edited_code
stringlengths 17
978k
| original_code
stringlengths 17
978k
|
|---|---|
from typing import Dict, Optional, Tuple, List
import aiosqlite
from src.consensus.sub_block_record import SubBlockRecord
from src.types.header_block import HeaderBlock
from src.util.ints import uint32, uint64
from src.wallet.block_record import HeaderBlockRecord
from src.types.sized_bytes import bytes32
class WalletBlockStore:
"""
This object handles HeaderBlocks and SubBlocks stored in DB used by wallet.
"""
db: aiosqlite.Connection
@classmethod
async def create(cls, connection: aiosqlite.Connection):
self = cls()
self.db = connection
await self.db.execute(
"CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, sub_height int, height int,"
" timestamp int, block blob)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)")
await self.db.execute("CREATE INDEX IF NOT EXISTS sub_height on header_blocks(sub_height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)")
# Sub block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS sub_block_records(header_hash "
"text PRIMARY KEY, prev_hash text, sub_height bigint, height int, weight bigint, total_iters text,"
"sub_block blob, is_peak tinyint)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS sub_block_height on sub_block_records(sub_height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on sub_block_records(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS hh on sub_block_records(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on sub_block_records(is_peak)")
await self.db.commit()
await self.db.commit()
return self
async def _clear_database(self):
cursor_2 = await self.db.execute("DELETE FROM header_blocks")
await cursor_2.close()
await self.db.commit()
async def rollback_lca_to_block(self, block_index):
# TODO
pass
async def add_block_record(self, block_record: HeaderBlockRecord, sub_block: SubBlockRecord):
"""
Adds a block record to the database. This block record is assumed to be connected
to the chain, but it may or may not be in the LCA path.
"""
if block_record.header.foliage_block is not None:
timestamp = block_record.header.foliage_block.timestamp
else:
timestamp = uint64(0)
cursor = await self.db.execute(
"INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?, ?)",
(
block_record.header_hash.hex(),
block_record.sub_block_height,
sub_block.height,
timestamp,
bytes(block_record),
),
)
await cursor.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO sub_block_records VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
(
block_record.header.header_hash.hex(),
block_record.header.prev_header_hash.hex(),
block_record.header.sub_block_height,
block_record.header.height,
block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(),
block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(),
bytes(sub_block),
False,
),
)
await cursor_2.close()
await self.db.commit()
async def get_header_block(self, header_hash: bytes32) -> Optional[HeaderBlock]:
"""Gets a block record from the database, if present"""
cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr = HeaderBlockRecord.from_bytes(row[4])
return hbr.header
else:
return None
async def get_header_block_at(self, sub_heights: List[uint32]) -> List[HeaderBlock]:
if len(sub_heights) == 0:
return []
heights_db = tuple(sub_heights)
formatted_str = f'SELECT block from header_blocks WHERE sub_height in ({'?,' * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [HeaderBlock.from_bytes(row[0]) for row in rows]
async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]:
"""Gets a block record from the database, if present"""
cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr = HeaderBlockRecord.from_bytes(row[4])
return hbr
else:
return None
async def get_sub_block_record(self, header_hash: bytes32) -> Optional[SubBlockRecord]:
cursor = await self.db.execute(
"SELECT sub_block from sub_block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return SubBlockRecord.from_bytes(row[0])
return None
async def get_sub_block_records(
self,
) -> Tuple[Dict[bytes32, SubBlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all sub blocks, as well as the header hash of the peak,
if present.
"""
cursor = await self.db.execute("SELECT * from sub_block_records")
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, SubBlockRecord] = {}
peak: Optional[bytes32] = None
for row in rows:
header_hash = bytes.fromhex(row[0])
ret[header_hash] = SubBlockRecord.from_bytes(row[6])
if row[7]:
assert peak is None # Sanity check, only one peak
peak = header_hash
return ret, peak
async def set_peak(self, header_hash: bytes32) -> None:
cursor_1 = await self.db.execute("UPDATE sub_block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE sub_block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
await self.db.commit()
|
from typing import Dict, Optional, Tuple, List
import aiosqlite
from src.consensus.sub_block_record import SubBlockRecord
from src.types.header_block import HeaderBlock
from src.util.ints import uint32, uint64
from src.wallet.block_record import HeaderBlockRecord
from src.types.sized_bytes import bytes32
class WalletBlockStore:
"""
This object handles HeaderBlocks and SubBlocks stored in DB used by wallet.
"""
db: aiosqlite.Connection
@classmethod
async def create(cls, connection: aiosqlite.Connection):
self = cls()
self.db = connection
await self.db.execute(
"CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, sub_height int, height int,"
" timestamp int, block blob)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)")
await self.db.execute("CREATE INDEX IF NOT EXISTS sub_height on header_blocks(sub_height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)")
# Sub block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS sub_block_records(header_hash "
"text PRIMARY KEY, prev_hash text, sub_height bigint, height int, weight bigint, total_iters text,"
"sub_block blob, is_peak tinyint)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS sub_block_height on sub_block_records(sub_height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on sub_block_records(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS hh on sub_block_records(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on sub_block_records(is_peak)")
await self.db.commit()
await self.db.commit()
return self
async def _clear_database(self):
cursor_2 = await self.db.execute("DELETE FROM header_blocks")
await cursor_2.close()
await self.db.commit()
async def rollback_lca_to_block(self, block_index):
# TODO
pass
async def add_block_record(self, block_record: HeaderBlockRecord, sub_block: SubBlockRecord):
"""
Adds a block record to the database. This block record is assumed to be connected
to the chain, but it may or may not be in the LCA path.
"""
if block_record.header.foliage_block is not None:
timestamp = block_record.header.foliage_block.timestamp
else:
timestamp = uint64(0)
cursor = await self.db.execute(
"INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?, ?)",
(
block_record.header_hash.hex(),
block_record.sub_block_height,
sub_block.height,
timestamp,
bytes(block_record),
),
)
await cursor.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO sub_block_records VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
(
block_record.header.header_hash.hex(),
block_record.header.prev_header_hash.hex(),
block_record.header.sub_block_height,
block_record.header.height,
block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(),
block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(),
bytes(sub_block),
False,
),
)
await cursor_2.close()
await self.db.commit()
async def get_header_block(self, header_hash: bytes32) -> Optional[HeaderBlock]:
"""Gets a block record from the database, if present"""
cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr = HeaderBlockRecord.from_bytes(row[4])
return hbr.header
else:
return None
async def get_header_block_at(self, sub_heights: List[uint32]) -> List[HeaderBlock]:
if len(sub_heights) == 0:
return []
heights_db = tuple(sub_heights)
formatted_str = f'SELECT block from header_blocks WHERE sub_height in ({"?," * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [HeaderBlock.from_bytes(row[0]) for row in rows]
async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]:
"""Gets a block record from the database, if present"""
cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr = HeaderBlockRecord.from_bytes(row[4])
return hbr
else:
return None
async def get_sub_block_record(self, header_hash: bytes32) -> Optional[SubBlockRecord]:
cursor = await self.db.execute(
"SELECT sub_block from sub_block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return SubBlockRecord.from_bytes(row[0])
return None
async def get_sub_block_records(
self,
) -> Tuple[Dict[bytes32, SubBlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all sub blocks, as well as the header hash of the peak,
if present.
"""
cursor = await self.db.execute("SELECT * from sub_block_records")
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, SubBlockRecord] = {}
peak: Optional[bytes32] = None
for row in rows:
header_hash = bytes.fromhex(row[0])
ret[header_hash] = SubBlockRecord.from_bytes(row[6])
if row[7]:
assert peak is None # Sanity check, only one peak
peak = header_hash
return ret, peak
async def set_peak(self, header_hash: bytes32) -> None:
cursor_1 = await self.db.execute("UPDATE sub_block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE sub_block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
await self.db.commit()
|
import json
from airiam.terraform.entity_terraformers.BaseEntityTransformer import BaseEntityTransformer
class IAMPolicyDocumentTransformer(BaseEntityTransformer):
def __init__(self, entity_json: dict, policy_name, principal_name=None):
policy_document_name = f"{policy_name}_document"
if principal_name:
policy_document_name = f"{principal_name}_{policy_document_name}"
super().__init__('data.aws_iam_policy_document', policy_document_name, entity_json)
def _generate_hcl2_code(self, entity_json) -> str:
statements = IAMPolicyDocumentTransformer.force_list(entity_json['Statement'])
if 'Principal' in statements[0]:
statements = self.transform_assume_policy_statements(statements)
else:
statements = self.transform_execution_policy(statements)
code = f"""data "aws_iam_policy_document" "{self._safe_name}" {{
version = "{entity_json.get('Version', '2012-10-17')}"
{statements}}}"""
return code
@staticmethod
def transform_execution_policy(statements):
statement_block = ""
for statement in statements:
sid_string = ""
if statement.get('Sid', '') != '':
sid_string = f"sid = \"{statement["Sid"]}\"\n "
actions = IAMPolicyDocumentTransformer.force_list(statement.get('Action'))
if 'Action' in statement:
action_str = f"actions = {json.dumps(actions)}"
else:
actions = IAMPolicyDocumentTransformer.force_list(statement.get('NotAction'))
action_str = f"not_actions = {json.dumps(actions)}"
condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement)
resources_list_str = json.dumps(IAMPolicyDocumentTransformer.force_list(statement.get('Resource'))).replace('${', '$\\u0024{')
statement_block += f""" statement {{
{sid_string}effect = "{statement['Effect']}"
{action_str}
resources = {resources_list_str}
{condition_block}
}}
"""
return statement_block
@staticmethod
def transform_assume_policy_statements(statements):
statement_block = ""
for statement in statements:
sid_string = ""
if statement.get('Sid', '') != '':
sid_string = f"sid = \"{statement["Sid"]}\"\n "
condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement)
statement_block += f""" statement {{
{sid_string}effect = "{statement['Effect']}"
actions = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Action']))}
principals {{
type = "{list(statement['Principal'].keys())[0]}"
identifiers = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Principal'][list(statement['Principal'].keys())[0]]))}
}}
{condition_block}}}
"""
return statement_block
@staticmethod
def transform_conditions(statement):
condition_block = ""
if 'Condition' in statement:
for test, items in statement['Condition'].items():
for variable, values in items.items():
values_str = json.dumps(IAMPolicyDocumentTransformer.force_list(values)).replace('${', '$\\u0024{')
condition_block += f"""
condition {{
test = "{test}"
variable = "{variable}"
values = {values_str}
}}
"""
return condition_block
@staticmethod
def force_list(x):
if isinstance(x, list):
return x
return [x]
def entities_to_import(self) -> list:
return []
|
import json
from airiam.terraform.entity_terraformers.BaseEntityTransformer import BaseEntityTransformer
class IAMPolicyDocumentTransformer(BaseEntityTransformer):
def __init__(self, entity_json: dict, policy_name, principal_name=None):
policy_document_name = f"{policy_name}_document"
if principal_name:
policy_document_name = f"{principal_name}_{policy_document_name}"
super().__init__('data.aws_iam_policy_document', policy_document_name, entity_json)
def _generate_hcl2_code(self, entity_json) -> str:
statements = IAMPolicyDocumentTransformer.force_list(entity_json['Statement'])
if 'Principal' in statements[0]:
statements = self.transform_assume_policy_statements(statements)
else:
statements = self.transform_execution_policy(statements)
code = f"""data "aws_iam_policy_document" "{self._safe_name}" {{
version = "{entity_json.get('Version', '2012-10-17')}"
{statements}}}"""
return code
@staticmethod
def transform_execution_policy(statements):
statement_block = ""
for statement in statements:
sid_string = ""
if statement.get('Sid', '') != '':
sid_string = f"sid = \"{statement['Sid']}\"\n "
actions = IAMPolicyDocumentTransformer.force_list(statement.get('Action'))
if 'Action' in statement:
action_str = f"actions = {json.dumps(actions)}"
else:
actions = IAMPolicyDocumentTransformer.force_list(statement.get('NotAction'))
action_str = f"not_actions = {json.dumps(actions)}"
condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement)
resources_list_str = json.dumps(IAMPolicyDocumentTransformer.force_list(statement.get('Resource'))).replace('${', '$\\u0024{')
statement_block += f""" statement {{
{sid_string}effect = "{statement['Effect']}"
{action_str}
resources = {resources_list_str}
{condition_block}
}}
"""
return statement_block
@staticmethod
def transform_assume_policy_statements(statements):
statement_block = ""
for statement in statements:
sid_string = ""
if statement.get('Sid', '') != '':
sid_string = f"sid = \"{statement['Sid']}\"\n "
condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement)
statement_block += f""" statement {{
{sid_string}effect = "{statement['Effect']}"
actions = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Action']))}
principals {{
type = "{list(statement['Principal'].keys())[0]}"
identifiers = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Principal'][list(statement['Principal'].keys())[0]]))}
}}
{condition_block}}}
"""
return statement_block
@staticmethod
def transform_conditions(statement):
condition_block = ""
if 'Condition' in statement:
for test, items in statement['Condition'].items():
for variable, values in items.items():
values_str = json.dumps(IAMPolicyDocumentTransformer.force_list(values)).replace('${', '$\\u0024{')
condition_block += f"""
condition {{
test = "{test}"
variable = "{variable}"
values = {values_str}
}}
"""
return condition_block
@staticmethod
def force_list(x):
if isinstance(x, list):
return x
return [x]
def entities_to_import(self) -> list:
return []
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from copy import deepcopy
from typing import Any, Dict, List, Optional
import kopf
from orbit_controller import ORBIT_API_GROUP, ORBIT_API_VERSION, dynamic_client
from orbit_controller.utils import imagereplication_utils
CONFIG: Dict[str, Any]
@kopf.on.startup()
def configure(settings: kopf.OperatorSettings, logger: kopf.Logger, **_: Any) -> None:
settings.admission.server = kopf.WebhookServer(
cafile="/certs/ca.crt",
certfile="/certs/tls.crt",
pkeyfile="/certs/tls.key",
port=443,
)
settings.persistence.progress_storage = kopf.MultiProgressStorage(
[
kopf.AnnotationsProgressStorage(prefix="orbit.aws"),
kopf.StatusProgressStorage(field="status.orbit-aws"),
]
)
settings.persistence.finalizer = "imagereplication-pod-webhook.orbit.aws/kopf-finalizer"
settings.posting.level = logging.getLevelName(os.environ.get("EVENT_LOG_LEVEL", "INFO"))
global CONFIG
CONFIG = imagereplication_utils.get_config()
logger.info("CONFIG: %s", CONFIG)
def _check_replication_status(value: str, **_: Any) -> bool:
return value not in ["Failed", "MaxAttemptsExceeded"]
@kopf.index( # type: ignore
ORBIT_API_GROUP,
ORBIT_API_VERSION,
"imagereplications",
field="status.replication.replicationStatus",
value=_check_replication_status,
)
def imagereplications_idx(namespace: str, name: str, spec: kopf.Spec, status: kopf.Status, **_: Any) -> Dict[str, Any]:
replication_status = status.get("replication", {}).get("replicationStatus", None)
return {
spec["destination"]: {
"namespace": namespace,
"name": name,
"source": spec["source"],
"replicationStatus": replication_status,
}
}
@kopf.on.mutate("pods", id="update-pod-images") # type: ignore
def update_pod_images(
spec: kopf.Spec,
patch: kopf.Patch,
dryrun: bool,
logger: kopf.Logger,
imagereplications_idx: kopf.Index[str, str],
**_: Any,
) -> kopf.Patch:
if dryrun:
logger.debug("DryRun - Skip Pod Mutation")
return patch
annotations = {}
init_containers: List[Dict[str, Any]] = []
containers: List[Dict[str, Any]] = []
replications = {}
def process_containers(
src_containers: Optional[List[Dict[str, Any]]], dest_containers: List[Dict[str, Any]]
) -> None:
for container in src_containers if src_containers else []:
image = container.get("image", "")
desired_image = imagereplication_utils.get_desired_image(image=image, config=CONFIG)
if image != desired_image:
container_copy = deepcopy(container)
container_copy["image"] = desired_image
dest_containers.append(container_copy)
replications[image] = desired_image
annotations[f"original-container-image~1{container["name"]}"] = image
process_containers(spec.get("initContainers", []), init_containers)
process_containers(spec.get("containers", []), containers)
if replications:
client = dynamic_client()
for source, destination in replications.items():
if not imagereplications_idx.get(destination, []):
imagereplication_utils.create_imagereplication(
namespace="orbit-system",
source=source,
destination=destination,
client=client,
logger=logger,
)
else:
logger.debug("Skipping ImageReplication Creation")
if annotations:
patch["metadata"] = {"annotations": annotations}
patch["spec"] = {}
if init_containers:
patch["spec"]["initContainers"] = init_containers
if containers:
patch["spec"]["containers"] = containers
logger.debug("Patch: %s", str(patch))
return patch
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from copy import deepcopy
from typing import Any, Dict, List, Optional
import kopf
from orbit_controller import ORBIT_API_GROUP, ORBIT_API_VERSION, dynamic_client
from orbit_controller.utils import imagereplication_utils
CONFIG: Dict[str, Any]
@kopf.on.startup()
def configure(settings: kopf.OperatorSettings, logger: kopf.Logger, **_: Any) -> None:
settings.admission.server = kopf.WebhookServer(
cafile="/certs/ca.crt",
certfile="/certs/tls.crt",
pkeyfile="/certs/tls.key",
port=443,
)
settings.persistence.progress_storage = kopf.MultiProgressStorage(
[
kopf.AnnotationsProgressStorage(prefix="orbit.aws"),
kopf.StatusProgressStorage(field="status.orbit-aws"),
]
)
settings.persistence.finalizer = "imagereplication-pod-webhook.orbit.aws/kopf-finalizer"
settings.posting.level = logging.getLevelName(os.environ.get("EVENT_LOG_LEVEL", "INFO"))
global CONFIG
CONFIG = imagereplication_utils.get_config()
logger.info("CONFIG: %s", CONFIG)
def _check_replication_status(value: str, **_: Any) -> bool:
return value not in ["Failed", "MaxAttemptsExceeded"]
@kopf.index( # type: ignore
ORBIT_API_GROUP,
ORBIT_API_VERSION,
"imagereplications",
field="status.replication.replicationStatus",
value=_check_replication_status,
)
def imagereplications_idx(namespace: str, name: str, spec: kopf.Spec, status: kopf.Status, **_: Any) -> Dict[str, Any]:
replication_status = status.get("replication", {}).get("replicationStatus", None)
return {
spec["destination"]: {
"namespace": namespace,
"name": name,
"source": spec["source"],
"replicationStatus": replication_status,
}
}
@kopf.on.mutate("pods", id="update-pod-images") # type: ignore
def update_pod_images(
spec: kopf.Spec,
patch: kopf.Patch,
dryrun: bool,
logger: kopf.Logger,
imagereplications_idx: kopf.Index[str, str],
**_: Any,
) -> kopf.Patch:
if dryrun:
logger.debug("DryRun - Skip Pod Mutation")
return patch
annotations = {}
init_containers: List[Dict[str, Any]] = []
containers: List[Dict[str, Any]] = []
replications = {}
def process_containers(
src_containers: Optional[List[Dict[str, Any]]], dest_containers: List[Dict[str, Any]]
) -> None:
for container in src_containers if src_containers else []:
image = container.get("image", "")
desired_image = imagereplication_utils.get_desired_image(image=image, config=CONFIG)
if image != desired_image:
container_copy = deepcopy(container)
container_copy["image"] = desired_image
dest_containers.append(container_copy)
replications[image] = desired_image
annotations[f"original-container-image~1{container['name']}"] = image
process_containers(spec.get("initContainers", []), init_containers)
process_containers(spec.get("containers", []), containers)
if replications:
client = dynamic_client()
for source, destination in replications.items():
if not imagereplications_idx.get(destination, []):
imagereplication_utils.create_imagereplication(
namespace="orbit-system",
source=source,
destination=destination,
client=client,
logger=logger,
)
else:
logger.debug("Skipping ImageReplication Creation")
if annotations:
patch["metadata"] = {"annotations": annotations}
patch["spec"] = {}
if init_containers:
patch["spec"]["initContainers"] = init_containers
if containers:
patch["spec"]["containers"] = containers
logger.debug("Patch: %s", str(patch))
return patch
|
import typing as ty
import numpy as np
import scipy.special
import sklearn.metrics as skm
from . import util
def calculate_metrics(
task_type: str,
y: np.ndarray,
prediction: np.ndarray,
classification_mode: str,
y_info: ty.Optional[ty.Dict[str, ty.Any]],
) -> ty.Dict[str, float]:
if task_type == util.REGRESSION:
del classification_mode
rmse = skm.mean_squared_error(y, prediction) ** 0.5 # type: ignore[code]
if y_info:
if y_info['policy'] == 'mean_std':
rmse *= y_info['std']
else:
assert False
return {'rmse': rmse, 'score': -rmse}
else:
assert task_type in (util.BINCLASS, util.MULTICLASS)
labels = None
if classification_mode == 'probs':
probs = prediction
elif classification_mode == 'logits':
probs = (
scipy.special.expit(prediction)
if task_type == util.BINCLASS
else scipy.special.softmax(prediction, axis=1)
)
else:
assert classification_mode == 'labels'
probs = None
labels = prediction
if labels is None:
labels = (
np.round(probs).astype('int64')
if task_type == util.BINCLASS
else probs.argmax(axis=1) # type: ignore[code]
)
result = skm.classification_report(y, labels, output_dict=True) # type: ignore[code]
if task_type == util.BINCLASS:
result['roc_auc'] = skm.roc_auc_score(y, probs) # type: ignore[code]
result['score'] = result['accuracy'] # type: ignore[code]
return result # type: ignore[code]
def make_summary(metrics: ty.Dict[str, ty.Any]) -> str:
precision = 3
summary = {}
for k, v in metrics.items():
if k.isdigit():
continue
k = {
'score': 'SCORE',
'accuracy': 'acc',
'roc_auc': 'roc_auc',
'macro avg': 'm',
'weighted avg': 'w',
}.get(k, k)
if isinstance(v, float):
v = round(v, precision)
summary[k] = v
else:
v = {
{'precision': 'p', 'recall': 'r', 'f1-score': 'f1', 'support': 's'}.get(
x, x
): round(v[x], precision)
for x in v
}
for item in v.items():
summary[k + item[0]] = item[1]
s = [f'score = {summary.pop('SCORE'):.3f}']
for k, v in summary.items():
if k not in ['mp', 'mr', 'wp', 'wr']: # just to save screen space
s.append(f'{k} = {v}')
return ' | '.join(s)
|
import typing as ty
import numpy as np
import scipy.special
import sklearn.metrics as skm
from . import util
def calculate_metrics(
task_type: str,
y: np.ndarray,
prediction: np.ndarray,
classification_mode: str,
y_info: ty.Optional[ty.Dict[str, ty.Any]],
) -> ty.Dict[str, float]:
if task_type == util.REGRESSION:
del classification_mode
rmse = skm.mean_squared_error(y, prediction) ** 0.5 # type: ignore[code]
if y_info:
if y_info['policy'] == 'mean_std':
rmse *= y_info['std']
else:
assert False
return {'rmse': rmse, 'score': -rmse}
else:
assert task_type in (util.BINCLASS, util.MULTICLASS)
labels = None
if classification_mode == 'probs':
probs = prediction
elif classification_mode == 'logits':
probs = (
scipy.special.expit(prediction)
if task_type == util.BINCLASS
else scipy.special.softmax(prediction, axis=1)
)
else:
assert classification_mode == 'labels'
probs = None
labels = prediction
if labels is None:
labels = (
np.round(probs).astype('int64')
if task_type == util.BINCLASS
else probs.argmax(axis=1) # type: ignore[code]
)
result = skm.classification_report(y, labels, output_dict=True) # type: ignore[code]
if task_type == util.BINCLASS:
result['roc_auc'] = skm.roc_auc_score(y, probs) # type: ignore[code]
result['score'] = result['accuracy'] # type: ignore[code]
return result # type: ignore[code]
def make_summary(metrics: ty.Dict[str, ty.Any]) -> str:
precision = 3
summary = {}
for k, v in metrics.items():
if k.isdigit():
continue
k = {
'score': 'SCORE',
'accuracy': 'acc',
'roc_auc': 'roc_auc',
'macro avg': 'm',
'weighted avg': 'w',
}.get(k, k)
if isinstance(v, float):
v = round(v, precision)
summary[k] = v
else:
v = {
{'precision': 'p', 'recall': 'r', 'f1-score': 'f1', 'support': 's'}.get(
x, x
): round(v[x], precision)
for x in v
}
for item in v.items():
summary[k + item[0]] = item[1]
s = [f'score = {summary.pop("SCORE"):.3f}']
for k, v in summary.items():
if k not in ['mp', 'mr', 'wp', 'wr']: # just to save screen space
s.append(f'{k} = {v}')
return ' | '.join(s)
|
# -*- coding: utf-8 -*-
"""
pybit
------------------------
pybit is a lightweight and high-performance API connector for the
RESTful and WebSocket APIs of the Bybit exchange.
Documentation can be found at
https://github.com/verata-veritatis/pybit
:copyright: (c) 2020-2021 verata-veritatis
:license: MIT License
"""
import time
import hmac
import json
import logging
import threading
import requests
import websocket
from datetime import datetime as dt
from concurrent.futures import ThreadPoolExecutor
from .exceptions import FailedRequestError, InvalidRequestError
# Requests will use simplejson if available.
try:
from simplejson.errors import JSONDecodeError
except ImportError:
from json.decoder import JSONDecodeError
# Versioning.
VERSION = '1.1.18'
class HTTP:
"""
Connector for Bybit's HTTP API.
:param endpoint: The endpoint URL of the HTTP API, e.g.
'https://api-testnet.bybit.com'.
:type endpoint: str
:param api_key: Your API key. Required for authenticated endpoints. Defaults
to None.
:type api_key: str
:param api_secret: Your API secret key. Required for authenticated
endpoints. Defaults to None.
:type api_secret: str
:param logging_level: The logging level of the built-in logger. Defaults to
logging.INFO. Options are CRITICAL (50), ERROR (40), WARNING (30),
INFO (20), DEBUG (10), or NOTSET (0).
:type logging_level: Union[int, logging.level]
:param log_requests: Whether or not pybit should log each HTTP request.
:type log_requests: bool
:param request_timeout: The timeout of each API request in seconds. Defaults
to 10 seconds.
:type request_timeout: int
:param recv_window: How long an HTTP request is valid in ms. Default is
5000.
:type recv_window: int
:param force_retry: Whether or not pybit should retry a timed-out request.
:type force_retry: bool
:param retry_codes: A list of non-fatal status codes to retry on.
:type retry_codes: set
:param ignore_codes: A list of non-fatal status codes to ignore.
:type ignore_codes: set
:param max_retries: The number of times to re-attempt a request.
:type max_retries: int
:param retry_delay: Seconds between retries for returned error or timed-out
requests. Default is 3 seconds.
:type retry_delay: int
:param referral_id: An optional referer ID can be added to each request for
identification.
:type referral_id: str
:returns: pybit.HTTP session.
"""
def __init__(self, endpoint=None, api_key=None, api_secret=None,
logging_level=logging.INFO, log_requests=False,
request_timeout=10, recv_window=5000, force_retry=False,
retry_codes=None, ignore_codes=None, max_retries=3,
retry_delay=3, referral_id=None):
"""Initializes the HTTP class."""
# Set the endpoint.
if endpoint is None:
self.endpoint = 'https://api.bybit.com'
else:
self.endpoint = endpoint
# Setup logger.
self.logger = logging.getLogger(__name__)
if len(logging.root.handlers) == 0:
#no handler on root logger set -> we add handler just for this logger to not mess with custom logic from outside
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
)
handler.setLevel(logging_level)
self.logger.addHandler(handler)
self.logger.debug('Initializing HTTP session.')
self.log_requests = log_requests
# Set API keys.
self.api_key = api_key
self.api_secret = api_secret
# Set timeout.
self.timeout = request_timeout
self.recv_window = recv_window
self.force_retry = force_retry
self.max_retries = max_retries
self.retry_delay = retry_delay
# Set whitelist of non-fatal Bybit status codes to retry on.
if retry_codes is None:
self.retry_codes = {10002, 10006, 30034, 30035, 130035, 130150}
else:
self.retry_codes = retry_codes
# Set whitelist of non-fatal Bybit status codes to ignore.
if ignore_codes is None:
self.ignore_codes = set()
else:
self.ignore_codes = ignore_codes
# Initialize requests session.
self.client = requests.Session()
self.client.headers.update(
{
'User-Agent': 'pybit-' + VERSION,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
)
# Add referral ID to header.
if referral_id:
self.client.headers.update({'Referer': referral_id})
def _exit(self):
"""Closes the request session."""
self.client.close()
self.logger.debug('HTTP session closed.')
def orderbook(self, **kwargs):
"""
Get the orderbook.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-orderbook.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/orderBook/L2',
query=kwargs
)
def query_kline(self, **kwargs):
"""
Get kline.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-querykline.
:returns: Request results as dictionary.
"""
# Replace query param 'from_time' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_time' in kwargs:
kwargs['from'] = kwargs.pop('from_time')
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/kline'
else:
suffix = '/v2/public/kline/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def latest_information_for_symbol(self, **kwargs):
"""
Get the latest information for symbol.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-latestsymbolinfo.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/tickers',
query=kwargs
)
def public_trading_records(self, **kwargs):
"""
Get recent trades. You can find a complete history of trades on Bybit
at https://public.bybit.com/.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-latestsymbolinfo.
:returns: Request results as dictionary.
"""
# Replace query param 'from_id' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_id' in kwargs:
kwargs['from'] = kwargs.pop('from_id')
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/recent-trading-records'
else:
suffix = '/v2/public/trading-records'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def query_symbol(self):
"""
Get symbol info.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/symbols'
)
def liquidated_orders(self, **kwargs):
"""
Retrieve the liquidated orders. The query range is the last seven days
of data.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-query_liqrecords.
:returns: Request results as dictionary.
"""
# Replace query param 'from_id' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_id' in kwargs:
kwargs['from'] = kwargs.pop('from_id')
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/liq-records',
query=kwargs
)
def query_mark_price_kline(self, **kwargs):
"""
Query mark price kline (like query_kline but for mark price).
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-markpricekline.
:returns: Request results as dictionary.
"""
# Replace query param 'from_time' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_time' in kwargs:
kwargs['from'] = kwargs.pop('from_time')
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/mark-price-kline'
else:
suffix = '/v2/public/mark-price-kline'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def open_interest(self, **kwargs):
"""
Gets the total amount of unsettled contracts. In other words, the total
number of contracts held in open positions.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-marketopeninterest.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/open-interest',
query=kwargs
)
def latest_big_deal(self, **kwargs):
"""
Obtain filled orders worth more than 500,000 USD within the last 24h.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-marketbigdeal.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/big-deal',
query=kwargs
)
def long_short_ratio(self, **kwargs):
"""
Gets the Bybit long-short ratio.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-marketaccountratio.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/account-ratio',
query=kwargs
)
def place_active_order(self, **kwargs):
"""
Places an active order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/create'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/create'
else:
suffix = '/v2/private/order/create'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def place_active_order_bulk(self, orders: list, max_in_parallel=10):
"""
Places multiple active orders in bulk using multithreading. For more
information on place_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.place_active_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def get_active_order(self, **kwargs):
"""
Gets an active order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-getactive.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-getactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/v2/private/order/list'
else:
suffix = '/futures/private/order/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_active_order(self, **kwargs):
"""
Cancels an active order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-cancelactive.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/cancel'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/cancel'
else:
suffix = '/v2/private/order/cancel'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_active_order_bulk(self, orders: list, max_in_parallel=10):
"""
Cancels multiple active orders in bulk using multithreading. For more
information on cancel_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.cancel_active_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def cancel_all_active_orders(self, **kwargs):
"""
Cancel all active orders that are unfilled or partially filled. Fully
filled orders cannot be cancelled.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelallactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/cancel-all'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/cancelAll'
else:
suffix = '/v2/private/order/cancelAll'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_active_order(self, **kwargs):
"""
Replace order can modify/amend your active orders.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-replaceactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/replace'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/replace'
else:
suffix = '/v2/private/order/replace'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_active_order_bulk(self, orders: list, max_in_parallel=10):
"""
Replaces multiple active orders in bulk using multithreading. For more
information on replace_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-replaceactive.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.replace_active_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def query_active_order(self, **kwargs):
"""
Query real-time active order information.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-queryactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/search'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order'
else:
suffix = '/v2/private/order'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def place_conditional_order(self, **kwargs):
"""
Places a conditional order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-placecond.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-placecond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/create'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/create'
else:
suffix = '/v2/private/stop-order/create'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def place_conditional_order_bulk(self, orders: list, max_in_parallel=10):
"""
Places multiple conditional orders in bulk using multithreading. For
more information on place_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-placecond.
:param orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.place_conditional_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def get_conditional_order(self, **kwargs):
"""
Gets a conditional order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-getcond.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-getcond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/list'
else:
suffix = '/v2/private/stop-order/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_conditional_order(self, **kwargs):
"""
Cancels a conditional order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-cancelcond.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelcond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/cancel'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/cancel'
else:
suffix = '/v2/private/stop-order/cancel'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_conditional_order_bulk(self, orders: list, max_in_parallel=10):
"""
Cancels multiple conditional orders in bulk using multithreading. For
more information on cancel_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-cancelcond.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.cancel_conditional_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def cancel_all_conditional_orders(self, **kwargs):
"""
Cancel all conditional orders that are unfilled or partially filled.
Fully filled orders cannot be cancelled.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelallcond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/cancel-all'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/cancelAll'
else:
suffix = '/v2/private/stop-order/cancelAll'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_conditional_order(self, **kwargs):
"""
Replace conditional order can modify/amend your conditional orders.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-replacecond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/replace'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/replace'
else:
suffix = '/v2/private/stop-order/replace'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_conditional_order_bulk(self, orders: list, max_in_parallel=10):
"""
Replaces multiple conditional orders in bulk using multithreading. For
more information on replace_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-replacecond.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.replace_conditional_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def query_conditional_order(self, **kwargs):
"""
Query real-time conditional order information.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-querycond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/search'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order'
else:
suffix = '/v2/private/stop-order'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def my_position(self, **kwargs):
"""
Get my position list.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-myposition.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/list'
else:
suffix = '/v2/private/position/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def set_auto_add_margin(self, **kwargs):
"""
For linear markets only. Set auto add margin, or Auto-Margin
Replenishment.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-setautoaddmargin.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='POST',
path=self.endpoint + '/private/linear/position/set-auto-add-margin',
query=kwargs,
auth=True
)
def set_leverage(self, **kwargs):
"""
Change user leverage.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-setleverage.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/set-leverage'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/leverage/save'
else:
suffix = '/v2/private/position/leverage/save'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cross_isolated_margin_switch(self, **kwargs):
"""
For linear markets only. Switch Cross/Isolated; must be leverage value
when switching from Cross to Isolated.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-marginswitch.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/switch-isolated'
else:
suffix = '/futures/private/position/switch-mode'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def position_mode_switch(self, **kwargs):
"""
For futures markets only. Switch Cross/Isolated; must set leverage
value when switching from Cross to Isolated.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse_futures/#t-marginswitch.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='POST',
path=self.endpoint + '/futures/private/position/switch-mode',
query=kwargs,
auth=True
)
def change_margin(self, **kwargs):
"""
Update margin.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-changemargin.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/change-position-margin'
else:
suffix = '/v2/private/position/change-position-margin'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def set_trading_stop(self, **kwargs):
"""
Set take profit, stop loss, and trailing stop for your open position.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-tradingstop.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/trading-stop'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/trading-stop'
else:
suffix = '/v2/private/position/trading-stop'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def add_reduce_margin(self, **kwargs):
"""
For linear markets only. Add margin.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-addmargin.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/private/linear/position/add-margin',
query=kwargs,
auth=True
)
def user_leverage(self, **kwargs):
"""
ABANDONED! Please use my_position instead. Fetches user leverage by
fetching user position.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-getleverage.
:returns: Request results as dictionary.
"""
self.logger.warning('This endpoint is deprecated and will be removed. Use my_position()')
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/position/list',
query=kwargs,
auth=True
)
def change_user_leverage(self, **kwargs):
"""
Change user leverage.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-changeleverage.
:returns: Request results as dictionary.
"""
self.logger.warning('This endpoint is deprecated and will be removed. Use set_leverage()')
return self._submit_request(
method='POST',
path=self.endpoint + '/user/leverage/save',
query=kwargs,
auth=True
)
def user_trade_records(self, **kwargs):
"""
Get user's trading records. The results are ordered in ascending order
(the first item is the oldest).
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-usertraderecords.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/trade/execution/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/execution/list'
else:
suffix = '/v2/private/execution/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def closed_profit_and_loss(self, **kwargs):
"""
Get user's closed profit and loss records. The results are ordered in
descending order (the first item is the latest).
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-closedprofitandloss.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/trade/closed-pnl/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/trade/closed-pnl/list'
else:
suffix = '/v2/private/trade/closed-pnl/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def get_risk_limit(self, is_linear=False):
"""
Get risk limit.
:param is_linear: True for linear, False for inverse. Defaults to
False.
:returns: Request results as dictionary.
"""
if is_linear:
suffix = '/public/linear/risk-limit'
else:
suffix = '/open-api/wallet/risk-limit/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
auth=True
)
def set_risk_limit(self, **kwargs):
"""
Set risk limit.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-setrisklimit.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='POST',
path=self.endpoint + '/open-api/wallet/risk-limit',
query=kwargs,
auth=True
)
def get_the_last_funding_rate(self, **kwargs):
"""
The funding rate is generated every 8 hours at 00:00 UTC, 08:00 UTC and
16:00 UTC. For example, if a request is sent at 12:00 UTC, the funding
rate generated earlier that day at 08:00 UTC will be sent.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-fundingrate.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/funding/prev-funding-rate'
else:
suffix = '/v2/private/funding/prev-funding-rate'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def my_last_funding_fee(self, **kwargs):
"""
Funding settlement occurs every 8 hours at 00:00 UTC, 08:00 UTC and
16:00 UTC. The current interval's fund fee settlement is based on the
previous interval's fund rate. For example, at 16:00, the settlement is
based on the fund rate generated at 8:00. The fund rate generated at
16:00 will be used at 0:00 the next day.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-mylastfundingfee.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/funding/prev-funding'
else:
suffix = '/v2/private/funding/prev-funding'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def predicted_funding_rate(self, **kwargs):
"""
Get predicted funding rate and my funding fee.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-predictedfunding.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/funding/predicted-funding'
else:
suffix = '/v2/private/funding/predicted-funding'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def api_key_info(self):
"""
Get user's API key info.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/account/api-key',
auth=True
)
def lcp_info(self, **kwargs):
"""
Get user's LCP (data refreshes once an hour). Only supports inverse
perpetual at present. See
https://bybit-exchange.github.io/docs/inverse/#t-liquidity to learn
more.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-lcp.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/account/lcp',
query=kwargs,
auth=True
)
def get_wallet_balance(self, **kwargs):
"""
Get wallet balance info.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-balance.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/wallet/balance',
query=kwargs,
auth=True
)
def wallet_fund_records(self, **kwargs):
"""
Get wallet fund records. This endpoint also shows exchanges from the
Asset Exchange, where the types for the exchange are
ExchangeOrderWithdraw and ExchangeOrderDeposit.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-walletrecords.
:returns: Request results as dictionary.
"""
# Replace query param 'from_id' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_id' in kwargs:
kwargs['from'] = kwargs.pop('from_id')
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/wallet/fund/records',
query=kwargs,
auth=True
)
def withdraw_records(self, **kwargs):
"""
Get withdrawal records.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-withdrawrecords.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/wallet/withdraw/list',
query=kwargs,
auth=True
)
def asset_exchange_records(self, **kwargs):
"""
Get asset exchange records.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-assetexchangerecords.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/exchange-order/list',
query=kwargs,
auth=True
)
def server_time(self):
"""
Get Bybit server time.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/time'
)
def announcement(self):
"""
Get Bybit OpenAPI announcements in the last 30 days by reverse order.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/announcement'
)
'''
Additional Methods
These methods use two or more requests to perform a specific
function and are exclusive to pybit.
'''
def close_position(self, symbol):
"""
Closes your open position. Makes two requests (position, order).
Parameters
------------------------
symbol : str
Required parameter. The symbol of the market as a string,
e.g. 'BTCUSD'.
"""
# First we fetch the user's position.
try:
r = self.my_position(symbol=symbol)['result']
# If there is no returned position, we want to handle that.
except KeyError:
return self.logger.error('No position detected.')
# Next we generate a list of market orders
orders = [
{
'symbol': symbol,
'order_type': 'Market',
'side': 'Buy' if p['side'] == 'Sell' else 'Sell',
'qty': p['size'],
'time_in_force': 'ImmediateOrCancel',
'reduce_only': True,
'close_on_trigger': True
} for p in (r if isinstance(r, list) else [r]) if p['size'] > 0
]
if len(orders) == 0:
return self.logger.error('No position detected.')
# Submit a market order against each open position for the same qty.
return self.place_active_order_bulk(orders)
'''
Internal methods; signature and request submission.
For more information about the request signature, see
https://bybit-exchange.github.io/docs/inverse/#t-authentication.
'''
def _auth(self, method, params, recv_window):
"""
Generates authentication signature per Bybit API specifications.
Notes
-------------------
Since the POST method requires a JSONified dict, we need to ensure
the signature uses lowercase booleans instead of Python's
capitalized booleans. This is done in the bug fix below.
"""
api_key = self.api_key
api_secret = self.api_secret
if api_key is None or api_secret is None:
raise PermissionError('Authenticated endpoints require keys.')
# Append required parameters.
params['api_key'] = api_key
params['recv_window'] = recv_window
params['timestamp'] = int(time.time() * 10 ** 3)
# Sort dictionary alphabetically to create querystring.
_val = '&'.join(
[str(k) + '=' + str(v) for k, v in sorted(params.items()) if
(k != 'sign') and (v is not None)]
)
# Bug fix. Replaces all capitalized booleans with lowercase.
if method == 'POST':
_val = _val.replace('True', 'true').replace('False', 'false')
# Return signature.
return str(hmac.new(
bytes(api_secret, 'utf-8'),
bytes(_val, 'utf-8'), digestmod='sha256'
).hexdigest())
def _submit_request(self, method=None, path=None, query=None, auth=False):
"""
Submits the request to the API.
Notes
-------------------
We use the params argument for the GET method, and data argument for
the POST method. Dicts passed to the data argument must be
JSONified prior to submitting request.
"""
# Store original recv_window.
recv_window = self.recv_window
# Bug fix: change floating whole numbers to integers to prevent
# auth signature errors.
if query is not None:
for i in query.keys():
if isinstance(query[i], float) and query[i] == int(query[i]):
query[i] = int(query[i])
# Send request and return headers with body. Retry if failed.
retries_attempted = self.max_retries
req_params = None
while True:
retries_attempted -= 1
if retries_attempted < 0:
raise FailedRequestError(
request=f'{method} {path}: {req_params}',
message='Bad Request. Retries exceeded maximum.',
status_code=400,
time=dt.utcnow().strftime("%H:%M:%S")
)
retries_remaining = f'{retries_attempted} retries remain.'
# Authenticate if we are using a private endpoint.
if auth:
# Prepare signature.
signature = self._auth(
method=method,
params=query,
recv_window=recv_window,
)
# Sort the dictionary alphabetically.
query = dict(sorted(query.items(), key=lambda x: x))
# Append the signature to the dictionary.
query['sign'] = signature
# Define parameters and log the request.
if query is not None:
req_params = {k: v for k, v in query.items() if
v is not None}
else:
req_params = {}
# Log the request.
if self.log_requests:
self.logger.debug(f'Request -> {method} {path}: {req_params}')
# Prepare request; use 'params' for GET and 'data' for POST.
if method == 'GET':
r = self.client.prepare_request(
requests.Request(method, path, params=req_params)
)
else:
r = self.client.prepare_request(
requests.Request(method, path, data=json.dumps(req_params))
)
# Attempt the request.
try:
s = self.client.send(r, timeout=self.timeout)
# If requests fires an error, retry.
except (
requests.exceptions.ReadTimeout,
requests.exceptions.SSLError,
requests.exceptions.ConnectionError
) as e:
if self.force_retry:
self.logger.error(f'{e}. {retries_remaining}')
time.sleep(self.retry_delay)
continue
else:
raise e
# Convert response to dictionary, or raise if requests error.
try:
s_json = s.json()
# If we have trouble converting, handle the error and retry.
except JSONDecodeError as e:
if self.force_retry:
self.logger.error(f'{e}. {retries_remaining}')
time.sleep(self.retry_delay)
continue
else:
raise FailedRequestError(
request=f'{method} {path}: {req_params}',
message='Conflict. Could not decode JSON.',
status_code=409,
time=dt.utcnow().strftime("%H:%M:%S")
)
# If Bybit returns an error, raise.
if s_json['ret_code']:
# Generate error message.
error_msg = (
f'{s_json['ret_msg']} (ErrCode: {s_json['ret_code']})'
)
# Set default retry delay.
err_delay = self.retry_delay
# Retry non-fatal whitelisted error requests.
if s_json['ret_code'] in self.retry_codes:
# 10002, recv_window error; add 2.5 seconds and retry.
if s_json['ret_code'] == 10002:
error_msg += '. Added 2.5 seconds to recv_window'
recv_window += 2500
# 10006, ratelimit error; wait until rate_limit_reset_ms
# and retry.
elif s_json['ret_code'] == 10006:
self.logger.error(
f'{error_msg}. Ratelimited on current request. '
f'Sleeping, then trying again. Request: {path}'
)
# Calculate how long we need to wait.
limit_reset = s_json['rate_limit_reset_ms'] / 1000
reset_str = time.strftime(
'%X', time.localtime(limit_reset)
)
err_delay = int(limit_reset) - int(time.time())
error_msg = (
f'Ratelimit will reset at {reset_str}. '
f'Sleeping for {err_delay} seconds'
)
# Log the error.
self.logger.error(f'{error_msg}. {retries_remaining}')
time.sleep(err_delay)
continue
elif s_json['ret_code'] in self.ignore_codes:
pass
else:
raise InvalidRequestError(
request=f'{method} {path}: {req_params}',
message=s_json["ret_msg"],
status_code=s_json["ret_code"],
time=dt.utcnow().strftime("%H:%M:%S")
)
else:
return s_json
class WebSocket:
"""
Connector for Bybit's WebSocket API.
"""
def __init__(self, endpoint, api_key=None, api_secret=None,
subscriptions=None, logging_level=logging.INFO,
max_data_length=200, ping_interval=30, ping_timeout=10,
restart_on_error=True, purge_on_fetch=True,
trim_data=True):
"""
Initializes the websocket session.
:param endpoint: Required parameter. The endpoint of the remote
websocket.
:param api_key: Your API key. Required for authenticated endpoints.
Defaults to None.
:param api_secret: Your API secret key. Required for authenticated
endpoints. Defaults to None.
:param subscriptions: A list of desired topics to subscribe to. See API
documentation for more information. Defaults to an empty list, which
will raise an error.
:param logging_level: The logging level of the built-in logger. Defaults
to logging.INFO. Options are CRITICAL (50), ERROR (40),
WARNING (30), INFO (20), DEBUG (10), or NOTSET (0).
:param max_data_length: The maximum number of rows for the stored
dataset. A smaller number will prevent performance or memory issues.
:param ping_interval: The number of seconds between each automated ping.
:param ping_timeout: The number of seconds to wait for 'pong' before an
Exception is raised.
:param restart_on_error: Whether or not the connection should restart on
error.
:param purge_on_fetch: Whether or not stored data should be purged each
fetch. For example, if the user subscribes to the 'trade' topic, and
fetches, should the data show all trade history up to the maximum
length or only get the data since the last fetch?
:param trim_data: Decide whether the returning data should be
trimmed to only provide the data value.
:returns: WebSocket session.
"""
if not subscriptions:
raise Exception('Subscription list cannot be empty!')
# Require symbol on 'trade' topic.
if 'trade' in subscriptions:
raise Exception('\'trade\' requires a ticker, e.g. '
'\'trade.BTCUSD\'.')
# Require currency on 'insurance' topic.
if 'insurance' in subscriptions:
raise Exception('\'insurance\' requires a currency, e.g. '
'\'insurance.BTC\'.')
# Require timeframe and ticker on 'klineV2' topic.
if 'klineV2' in subscriptions:
raise Exception('\'klineV2\' requires a timeframe and ticker, e.g.'
' \'klineV2.5.BTCUSD\'.')
# set websocket name for logging purposes
self.wsName = 'Authenticated' if api_key else 'Non-Authenticated'
# Setup logger.
self.logger = logging.getLogger(__name__)
if len(logging.root.handlers) == 0:
# no handler on root logger set -> we add handler just for this logger to not mess with custom logic from outside
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
)
handler.setLevel(logging_level)
self.logger.addHandler(handler)
self.logger.debug(f'Initializing {self.wsName} WebSocket.')
# Ensure authentication for private topics.
if any(i in subscriptions for i in [
'position',
'execution',
'order',
'stop_order',
'wallet'
]) and api_key is None:
raise PermissionError('You must be authorized to use '
'private topics!')
# Set endpoint.
self.endpoint = endpoint
# Set API keys.
self.api_key = api_key
self.api_secret = api_secret
# Set topic subscriptions for WebSocket.
self.subscriptions = subscriptions
self.max_length = max_data_length
# Set ping settings.
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
# Other optional data handling settings.
self.handle_error = restart_on_error
self.purge = purge_on_fetch
self.trim = trim_data
# Set initial state, initialize dictionary and connnect.
self._reset()
self._connect(self.endpoint)
def fetch(self, topic):
"""
Fetches data from the subscribed topic.
:param topic: Required parameter. The subscribed topic to poll.
:returns: Filtered data as dict.
"""
# If topic isn't a string.
if not isinstance(topic, str):
self.logger.error('Topic argument must be a string.')
return
# If the topic given isn't in the initial subscribed list.
if topic not in self.subscriptions:
self.logger.error(f'You aren\'t subscribed to the {topic} topic.')
return
# Pop all trade or execution data on each poll.
# dont pop order or stop_order data as we will lose valuable state
if topic.startswith((
'trade',
'execution'
)) and not topic.startswith('orderBook'):
data = self.data[topic].copy()
if self.purge:
self.data[topic] = []
return data
else:
try:
return self.data[topic]
except KeyError:
return []
def ping(self):
"""
Pings the remote server to test the connection. The status of the
connection can be monitored using ws.ping().
"""
self.ws.send(json.dumps({'op': 'ping'}))
def exit(self):
"""
Closes the websocket connection.
"""
self.ws.close()
while self.ws.sock:
continue
self.exited = True
def _auth(self):
"""
Authorize websocket connection.
"""
# Generate expires.
expires = int((time.time() + 1) * 1000)
# Generate signature.
_val = f'GET/realtime{expires}'
signature = str(hmac.new(
bytes(self.api_secret, 'utf-8'),
bytes(_val, 'utf-8'), digestmod='sha256'
).hexdigest())
# Authenticate with API.
self.ws.send(
json.dumps({
'op': 'auth',
'args': [self.api_key, expires, signature]
})
)
def _connect(self, url):
"""
Open websocket in a thread.
"""
self.ws = websocket.WebSocketApp(
url=url,
on_message=lambda ws, msg: self._on_message(msg),
on_close=self._on_close(),
on_open=self._on_open(),
on_error=lambda ws, err: self._on_error(err)
)
# Setup the thread running WebSocketApp.
self.wst = threading.Thread(target=lambda: self.ws.run_forever(
ping_interval=self.ping_interval,
ping_timeout=self.ping_timeout
))
# Configure as daemon; start.
self.wst.daemon = True
self.wst.start()
# Attempt to connect for X seconds.
retries = 10
while retries > 0 and (not self.ws.sock or not self.ws.sock.connected):
retries -= 1
time.sleep(1)
# If connection was not successful, raise error.
if retries <= 0:
self.exit()
raise websocket.WebSocketTimeoutException('Connection failed.')
# If given an api_key, authenticate.
if self.api_key and self.api_secret:
self._auth()
# Check if subscriptions is a list.
if isinstance(self.subscriptions, str):
self.subscriptions = [self.subscriptions]
# Subscribe to the requested topics.
self.ws.send(
json.dumps({
'op': 'subscribe',
'args': self.subscriptions
})
)
# Initialize the topics.
for topic in self.subscriptions:
if topic not in self.data:
self.data[topic] = {}
@staticmethod
def _find_index(source, target, key):
"""
Find the index in source list of the targeted ID.
"""
return next(i for i, j in enumerate(source) if j[key] == target[key])
def _on_message(self, message):
"""
Parse incoming messages. Similar structure to the
official WS connector.
"""
# Load dict of message.
msg_json = json.loads(message)
# If 'success' exists
if 'success' in msg_json:
if msg_json['success']:
# If 'request' exists.
if 'request' in msg_json:
# If we get succesful auth, notify user.
if msg_json['request']['op'] == 'auth':
self.logger.debug('Authorization successful.')
self.auth = True
# If we get successful subscription, notify user.
if msg_json['request']['op'] == 'subscribe':
sub = msg_json['request']['args']
self.logger.debug(f'Subscription to {sub} successful.')
else:
response = msg_json['ret_msg']
if 'unknown topic' in response:
self.logger.error('Couldn\'t subscribe to topic.'
f' Error: {response}.')
# If we get unsuccesful auth, notify user.
elif msg_json['request']['op'] == 'auth':
self.logger.debug('Authorization failed. Please check your '
'API keys and restart.')
elif 'topic' in msg_json:
topic = msg_json['topic']
# If incoming 'orderbookL2' data.
if 'orderBook' in topic:
# Make updates according to delta response.
if 'delta' in msg_json['type']:
# Delete.
for entry in msg_json['data']['delete']:
index = self._find_index(self.data[topic], entry, 'id')
self.data[topic].pop(index)
# Update.
for entry in msg_json['data']['update']:
index = self._find_index(self.data[topic], entry, 'id')
self.data[topic][index] = entry
# Insert.
for entry in msg_json['data']['insert']:
self.data[topic].append(entry)
# Record the initial snapshot.
elif 'snapshot' in msg_json['type']:
self.data[topic] = msg_json['data']
# For incoming 'order' and 'stop_order' data.
elif any(i in topic for i in ['order', 'stop_order']):
# record incoming data
for i in msg_json['data']:
try:
# update existing entries
# temporary workaround for field anomaly in stop_order data
ord_id = topic + '_id' if i['symbol'].endswith('USDT') else 'order_id'
index = self._find_index(self.data[topic], i, ord_id)
self.data[topic][index] = i
except StopIteration:
# Keep appending or create new list if not already created.
try:
self.data[topic].append(i)
except AttributeError:
self.data[topic] = msg_json['data']
# For incoming 'trade' and 'execution' data.
elif any(i in topic for i in ['trade', 'execution']):
# Keep appending or create new list if not already created.
try:
for i in msg_json['data']:
self.data[topic].append(i)
except AttributeError:
self.data[topic] = msg_json['data']
# If list is too long, pop the first entry.
if len(self.data[topic]) > self.max_length:
self.data[topic].pop(0)
# If incoming 'insurance', 'klineV2', or 'wallet' data.
elif any(i in topic for i in ['insurance', 'klineV2', 'wallet',
'candle']):
# Record incoming data.
self.data[topic] = msg_json['data'][0] if self.trim else msg_json
# If incoming 'instrument_info' data.
elif 'instrument_info' in topic:
# Make updates according to delta response.
if 'delta' in msg_json['type']:
for i in msg_json['data']['update'][0]:
self.data[topic][i] = msg_json['data']['update'][0][i]
# Record the initial snapshot.
elif 'snapshot' in msg_json['type']:
self.data[topic] = msg_json['data'] if self.trim else msg_json
# If incoming 'position' data.
elif 'position' in topic:
# Record incoming position data.
for p in msg_json['data']:
# linear (USDT) positions have Buy|Sell side and
# updates contain all USDT positions.
# For linear tickers...
if p['symbol'].endswith('USDT'):
try:
self.data[topic][p['symbol']][p['side']] = p
# if side key hasn't been created yet...
except KeyError:
self.data[topic][p['symbol']] = {p['side']: p}
# For non-linear tickers...
else:
self.data[topic][p['symbol']] = p
def _on_error(self, error):
"""
Exit on errors and raise exception, or attempt reconnect.
"""
if not self.exited:
self.logger.error(f'WebSocket {self.wsName} encountered error: {error}.')
self.exit()
# Reconnect.
if self.handle_error:
self._reset()
self._connect(self.endpoint)
def _on_open(self):
"""
Log WS open.
"""
self.logger.debug(f'WebSocket {self.wsName} opened.')
def _on_close(self):
"""
Log WS close.
"""
self.logger.debug(f'WebSocket {self.wsName} closed.')
def _reset(self):
"""
Set state booleans and initialize dictionary.
"""
self.exited = False
self.auth = False
self.data = {}
|
# -*- coding: utf-8 -*-
"""
pybit
------------------------
pybit is a lightweight and high-performance API connector for the
RESTful and WebSocket APIs of the Bybit exchange.
Documentation can be found at
https://github.com/verata-veritatis/pybit
:copyright: (c) 2020-2021 verata-veritatis
:license: MIT License
"""
import time
import hmac
import json
import logging
import threading
import requests
import websocket
from datetime import datetime as dt
from concurrent.futures import ThreadPoolExecutor
from .exceptions import FailedRequestError, InvalidRequestError
# Requests will use simplejson if available.
try:
from simplejson.errors import JSONDecodeError
except ImportError:
from json.decoder import JSONDecodeError
# Versioning.
VERSION = '1.1.18'
class HTTP:
"""
Connector for Bybit's HTTP API.
:param endpoint: The endpoint URL of the HTTP API, e.g.
'https://api-testnet.bybit.com'.
:type endpoint: str
:param api_key: Your API key. Required for authenticated endpoints. Defaults
to None.
:type api_key: str
:param api_secret: Your API secret key. Required for authenticated
endpoints. Defaults to None.
:type api_secret: str
:param logging_level: The logging level of the built-in logger. Defaults to
logging.INFO. Options are CRITICAL (50), ERROR (40), WARNING (30),
INFO (20), DEBUG (10), or NOTSET (0).
:type logging_level: Union[int, logging.level]
:param log_requests: Whether or not pybit should log each HTTP request.
:type log_requests: bool
:param request_timeout: The timeout of each API request in seconds. Defaults
to 10 seconds.
:type request_timeout: int
:param recv_window: How long an HTTP request is valid in ms. Default is
5000.
:type recv_window: int
:param force_retry: Whether or not pybit should retry a timed-out request.
:type force_retry: bool
:param retry_codes: A list of non-fatal status codes to retry on.
:type retry_codes: set
:param ignore_codes: A list of non-fatal status codes to ignore.
:type ignore_codes: set
:param max_retries: The number of times to re-attempt a request.
:type max_retries: int
:param retry_delay: Seconds between retries for returned error or timed-out
requests. Default is 3 seconds.
:type retry_delay: int
:param referral_id: An optional referer ID can be added to each request for
identification.
:type referral_id: str
:returns: pybit.HTTP session.
"""
def __init__(self, endpoint=None, api_key=None, api_secret=None,
logging_level=logging.INFO, log_requests=False,
request_timeout=10, recv_window=5000, force_retry=False,
retry_codes=None, ignore_codes=None, max_retries=3,
retry_delay=3, referral_id=None):
"""Initializes the HTTP class."""
# Set the endpoint.
if endpoint is None:
self.endpoint = 'https://api.bybit.com'
else:
self.endpoint = endpoint
# Setup logger.
self.logger = logging.getLogger(__name__)
if len(logging.root.handlers) == 0:
#no handler on root logger set -> we add handler just for this logger to not mess with custom logic from outside
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
)
handler.setLevel(logging_level)
self.logger.addHandler(handler)
self.logger.debug('Initializing HTTP session.')
self.log_requests = log_requests
# Set API keys.
self.api_key = api_key
self.api_secret = api_secret
# Set timeout.
self.timeout = request_timeout
self.recv_window = recv_window
self.force_retry = force_retry
self.max_retries = max_retries
self.retry_delay = retry_delay
# Set whitelist of non-fatal Bybit status codes to retry on.
if retry_codes is None:
self.retry_codes = {10002, 10006, 30034, 30035, 130035, 130150}
else:
self.retry_codes = retry_codes
# Set whitelist of non-fatal Bybit status codes to ignore.
if ignore_codes is None:
self.ignore_codes = set()
else:
self.ignore_codes = ignore_codes
# Initialize requests session.
self.client = requests.Session()
self.client.headers.update(
{
'User-Agent': 'pybit-' + VERSION,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
)
# Add referral ID to header.
if referral_id:
self.client.headers.update({'Referer': referral_id})
def _exit(self):
"""Closes the request session."""
self.client.close()
self.logger.debug('HTTP session closed.')
def orderbook(self, **kwargs):
"""
Get the orderbook.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-orderbook.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/orderBook/L2',
query=kwargs
)
def query_kline(self, **kwargs):
"""
Get kline.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-querykline.
:returns: Request results as dictionary.
"""
# Replace query param 'from_time' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_time' in kwargs:
kwargs['from'] = kwargs.pop('from_time')
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/kline'
else:
suffix = '/v2/public/kline/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def latest_information_for_symbol(self, **kwargs):
"""
Get the latest information for symbol.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-latestsymbolinfo.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/tickers',
query=kwargs
)
def public_trading_records(self, **kwargs):
"""
Get recent trades. You can find a complete history of trades on Bybit
at https://public.bybit.com/.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-latestsymbolinfo.
:returns: Request results as dictionary.
"""
# Replace query param 'from_id' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_id' in kwargs:
kwargs['from'] = kwargs.pop('from_id')
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/recent-trading-records'
else:
suffix = '/v2/public/trading-records'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def query_symbol(self):
"""
Get symbol info.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/symbols'
)
def liquidated_orders(self, **kwargs):
"""
Retrieve the liquidated orders. The query range is the last seven days
of data.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-query_liqrecords.
:returns: Request results as dictionary.
"""
# Replace query param 'from_id' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_id' in kwargs:
kwargs['from'] = kwargs.pop('from_id')
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/liq-records',
query=kwargs
)
def query_mark_price_kline(self, **kwargs):
"""
Query mark price kline (like query_kline but for mark price).
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-markpricekline.
:returns: Request results as dictionary.
"""
# Replace query param 'from_time' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_time' in kwargs:
kwargs['from'] = kwargs.pop('from_time')
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/mark-price-kline'
else:
suffix = '/v2/public/mark-price-kline'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def open_interest(self, **kwargs):
"""
Gets the total amount of unsettled contracts. In other words, the total
number of contracts held in open positions.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-marketopeninterest.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/open-interest',
query=kwargs
)
def latest_big_deal(self, **kwargs):
"""
Obtain filled orders worth more than 500,000 USD within the last 24h.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-marketbigdeal.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/big-deal',
query=kwargs
)
def long_short_ratio(self, **kwargs):
"""
Gets the Bybit long-short ratio.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-marketaccountratio.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/account-ratio',
query=kwargs
)
def place_active_order(self, **kwargs):
"""
Places an active order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/create'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/create'
else:
suffix = '/v2/private/order/create'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def place_active_order_bulk(self, orders: list, max_in_parallel=10):
"""
Places multiple active orders in bulk using multithreading. For more
information on place_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.place_active_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def get_active_order(self, **kwargs):
"""
Gets an active order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-getactive.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-getactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/v2/private/order/list'
else:
suffix = '/futures/private/order/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_active_order(self, **kwargs):
"""
Cancels an active order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-cancelactive.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/cancel'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/cancel'
else:
suffix = '/v2/private/order/cancel'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_active_order_bulk(self, orders: list, max_in_parallel=10):
"""
Cancels multiple active orders in bulk using multithreading. For more
information on cancel_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.cancel_active_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def cancel_all_active_orders(self, **kwargs):
"""
Cancel all active orders that are unfilled or partially filled. Fully
filled orders cannot be cancelled.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelallactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/cancel-all'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/cancelAll'
else:
suffix = '/v2/private/order/cancelAll'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_active_order(self, **kwargs):
"""
Replace order can modify/amend your active orders.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-replaceactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/replace'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/replace'
else:
suffix = '/v2/private/order/replace'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_active_order_bulk(self, orders: list, max_in_parallel=10):
"""
Replaces multiple active orders in bulk using multithreading. For more
information on replace_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-replaceactive.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.replace_active_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def query_active_order(self, **kwargs):
"""
Query real-time active order information.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-queryactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/search'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order'
else:
suffix = '/v2/private/order'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def place_conditional_order(self, **kwargs):
"""
Places a conditional order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-placecond.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-placecond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/create'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/create'
else:
suffix = '/v2/private/stop-order/create'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def place_conditional_order_bulk(self, orders: list, max_in_parallel=10):
"""
Places multiple conditional orders in bulk using multithreading. For
more information on place_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-placecond.
:param orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.place_conditional_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def get_conditional_order(self, **kwargs):
"""
Gets a conditional order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-getcond.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-getcond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/list'
else:
suffix = '/v2/private/stop-order/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_conditional_order(self, **kwargs):
"""
Cancels a conditional order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-cancelcond.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelcond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/cancel'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/cancel'
else:
suffix = '/v2/private/stop-order/cancel'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_conditional_order_bulk(self, orders: list, max_in_parallel=10):
"""
Cancels multiple conditional orders in bulk using multithreading. For
more information on cancel_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-cancelcond.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.cancel_conditional_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def cancel_all_conditional_orders(self, **kwargs):
"""
Cancel all conditional orders that are unfilled or partially filled.
Fully filled orders cannot be cancelled.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelallcond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/cancel-all'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/cancelAll'
else:
suffix = '/v2/private/stop-order/cancelAll'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_conditional_order(self, **kwargs):
"""
Replace conditional order can modify/amend your conditional orders.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-replacecond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/replace'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/replace'
else:
suffix = '/v2/private/stop-order/replace'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_conditional_order_bulk(self, orders: list, max_in_parallel=10):
"""
Replaces multiple conditional orders in bulk using multithreading. For
more information on replace_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-replacecond.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.replace_conditional_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def query_conditional_order(self, **kwargs):
"""
Query real-time conditional order information.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-querycond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/search'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order'
else:
suffix = '/v2/private/stop-order'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def my_position(self, **kwargs):
"""
Get my position list.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-myposition.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/list'
else:
suffix = '/v2/private/position/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def set_auto_add_margin(self, **kwargs):
"""
For linear markets only. Set auto add margin, or Auto-Margin
Replenishment.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-setautoaddmargin.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='POST',
path=self.endpoint + '/private/linear/position/set-auto-add-margin',
query=kwargs,
auth=True
)
def set_leverage(self, **kwargs):
"""
Change user leverage.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-setleverage.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/set-leverage'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/leverage/save'
else:
suffix = '/v2/private/position/leverage/save'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cross_isolated_margin_switch(self, **kwargs):
"""
For linear markets only. Switch Cross/Isolated; must be leverage value
when switching from Cross to Isolated.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-marginswitch.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/switch-isolated'
else:
suffix = '/futures/private/position/switch-mode'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def position_mode_switch(self, **kwargs):
"""
For futures markets only. Switch Cross/Isolated; must set leverage
value when switching from Cross to Isolated.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse_futures/#t-marginswitch.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='POST',
path=self.endpoint + '/futures/private/position/switch-mode',
query=kwargs,
auth=True
)
def change_margin(self, **kwargs):
"""
Update margin.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-changemargin.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/change-position-margin'
else:
suffix = '/v2/private/position/change-position-margin'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def set_trading_stop(self, **kwargs):
"""
Set take profit, stop loss, and trailing stop for your open position.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-tradingstop.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/trading-stop'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/trading-stop'
else:
suffix = '/v2/private/position/trading-stop'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def add_reduce_margin(self, **kwargs):
"""
For linear markets only. Add margin.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-addmargin.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/private/linear/position/add-margin',
query=kwargs,
auth=True
)
def user_leverage(self, **kwargs):
"""
ABANDONED! Please use my_position instead. Fetches user leverage by
fetching user position.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-getleverage.
:returns: Request results as dictionary.
"""
self.logger.warning('This endpoint is deprecated and will be removed. Use my_position()')
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/position/list',
query=kwargs,
auth=True
)
def change_user_leverage(self, **kwargs):
"""
Change user leverage.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-changeleverage.
:returns: Request results as dictionary.
"""
self.logger.warning('This endpoint is deprecated and will be removed. Use set_leverage()')
return self._submit_request(
method='POST',
path=self.endpoint + '/user/leverage/save',
query=kwargs,
auth=True
)
def user_trade_records(self, **kwargs):
"""
Get user's trading records. The results are ordered in ascending order
(the first item is the oldest).
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-usertraderecords.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/trade/execution/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/execution/list'
else:
suffix = '/v2/private/execution/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def closed_profit_and_loss(self, **kwargs):
"""
Get user's closed profit and loss records. The results are ordered in
descending order (the first item is the latest).
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-closedprofitandloss.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/trade/closed-pnl/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/trade/closed-pnl/list'
else:
suffix = '/v2/private/trade/closed-pnl/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def get_risk_limit(self, is_linear=False):
"""
Get risk limit.
:param is_linear: True for linear, False for inverse. Defaults to
False.
:returns: Request results as dictionary.
"""
if is_linear:
suffix = '/public/linear/risk-limit'
else:
suffix = '/open-api/wallet/risk-limit/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
auth=True
)
def set_risk_limit(self, **kwargs):
"""
Set risk limit.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-setrisklimit.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='POST',
path=self.endpoint + '/open-api/wallet/risk-limit',
query=kwargs,
auth=True
)
def get_the_last_funding_rate(self, **kwargs):
"""
The funding rate is generated every 8 hours at 00:00 UTC, 08:00 UTC and
16:00 UTC. For example, if a request is sent at 12:00 UTC, the funding
rate generated earlier that day at 08:00 UTC will be sent.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-fundingrate.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/funding/prev-funding-rate'
else:
suffix = '/v2/private/funding/prev-funding-rate'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def my_last_funding_fee(self, **kwargs):
"""
Funding settlement occurs every 8 hours at 00:00 UTC, 08:00 UTC and
16:00 UTC. The current interval's fund fee settlement is based on the
previous interval's fund rate. For example, at 16:00, the settlement is
based on the fund rate generated at 8:00. The fund rate generated at
16:00 will be used at 0:00 the next day.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-mylastfundingfee.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/funding/prev-funding'
else:
suffix = '/v2/private/funding/prev-funding'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def predicted_funding_rate(self, **kwargs):
"""
Get predicted funding rate and my funding fee.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-predictedfunding.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/funding/predicted-funding'
else:
suffix = '/v2/private/funding/predicted-funding'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def api_key_info(self):
"""
Get user's API key info.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/account/api-key',
auth=True
)
def lcp_info(self, **kwargs):
"""
Get user's LCP (data refreshes once an hour). Only supports inverse
perpetual at present. See
https://bybit-exchange.github.io/docs/inverse/#t-liquidity to learn
more.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-lcp.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/account/lcp',
query=kwargs,
auth=True
)
def get_wallet_balance(self, **kwargs):
"""
Get wallet balance info.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-balance.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/wallet/balance',
query=kwargs,
auth=True
)
def wallet_fund_records(self, **kwargs):
"""
Get wallet fund records. This endpoint also shows exchanges from the
Asset Exchange, where the types for the exchange are
ExchangeOrderWithdraw and ExchangeOrderDeposit.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-walletrecords.
:returns: Request results as dictionary.
"""
# Replace query param 'from_id' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_id' in kwargs:
kwargs['from'] = kwargs.pop('from_id')
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/wallet/fund/records',
query=kwargs,
auth=True
)
def withdraw_records(self, **kwargs):
"""
Get withdrawal records.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-withdrawrecords.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/wallet/withdraw/list',
query=kwargs,
auth=True
)
def asset_exchange_records(self, **kwargs):
"""
Get asset exchange records.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-assetexchangerecords.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/exchange-order/list',
query=kwargs,
auth=True
)
def server_time(self):
"""
Get Bybit server time.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/time'
)
def announcement(self):
"""
Get Bybit OpenAPI announcements in the last 30 days by reverse order.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/announcement'
)
'''
Additional Methods
These methods use two or more requests to perform a specific
function and are exclusive to pybit.
'''
def close_position(self, symbol):
"""
Closes your open position. Makes two requests (position, order).
Parameters
------------------------
symbol : str
Required parameter. The symbol of the market as a string,
e.g. 'BTCUSD'.
"""
# First we fetch the user's position.
try:
r = self.my_position(symbol=symbol)['result']
# If there is no returned position, we want to handle that.
except KeyError:
return self.logger.error('No position detected.')
# Next we generate a list of market orders
orders = [
{
'symbol': symbol,
'order_type': 'Market',
'side': 'Buy' if p['side'] == 'Sell' else 'Sell',
'qty': p['size'],
'time_in_force': 'ImmediateOrCancel',
'reduce_only': True,
'close_on_trigger': True
} for p in (r if isinstance(r, list) else [r]) if p['size'] > 0
]
if len(orders) == 0:
return self.logger.error('No position detected.')
# Submit a market order against each open position for the same qty.
return self.place_active_order_bulk(orders)
'''
Internal methods; signature and request submission.
For more information about the request signature, see
https://bybit-exchange.github.io/docs/inverse/#t-authentication.
'''
def _auth(self, method, params, recv_window):
"""
Generates authentication signature per Bybit API specifications.
Notes
-------------------
Since the POST method requires a JSONified dict, we need to ensure
the signature uses lowercase booleans instead of Python's
capitalized booleans. This is done in the bug fix below.
"""
api_key = self.api_key
api_secret = self.api_secret
if api_key is None or api_secret is None:
raise PermissionError('Authenticated endpoints require keys.')
# Append required parameters.
params['api_key'] = api_key
params['recv_window'] = recv_window
params['timestamp'] = int(time.time() * 10 ** 3)
# Sort dictionary alphabetically to create querystring.
_val = '&'.join(
[str(k) + '=' + str(v) for k, v in sorted(params.items()) if
(k != 'sign') and (v is not None)]
)
# Bug fix. Replaces all capitalized booleans with lowercase.
if method == 'POST':
_val = _val.replace('True', 'true').replace('False', 'false')
# Return signature.
return str(hmac.new(
bytes(api_secret, 'utf-8'),
bytes(_val, 'utf-8'), digestmod='sha256'
).hexdigest())
def _submit_request(self, method=None, path=None, query=None, auth=False):
"""
Submits the request to the API.
Notes
-------------------
We use the params argument for the GET method, and data argument for
the POST method. Dicts passed to the data argument must be
JSONified prior to submitting request.
"""
# Store original recv_window.
recv_window = self.recv_window
# Bug fix: change floating whole numbers to integers to prevent
# auth signature errors.
if query is not None:
for i in query.keys():
if isinstance(query[i], float) and query[i] == int(query[i]):
query[i] = int(query[i])
# Send request and return headers with body. Retry if failed.
retries_attempted = self.max_retries
req_params = None
while True:
retries_attempted -= 1
if retries_attempted < 0:
raise FailedRequestError(
request=f'{method} {path}: {req_params}',
message='Bad Request. Retries exceeded maximum.',
status_code=400,
time=dt.utcnow().strftime("%H:%M:%S")
)
retries_remaining = f'{retries_attempted} retries remain.'
# Authenticate if we are using a private endpoint.
if auth:
# Prepare signature.
signature = self._auth(
method=method,
params=query,
recv_window=recv_window,
)
# Sort the dictionary alphabetically.
query = dict(sorted(query.items(), key=lambda x: x))
# Append the signature to the dictionary.
query['sign'] = signature
# Define parameters and log the request.
if query is not None:
req_params = {k: v for k, v in query.items() if
v is not None}
else:
req_params = {}
# Log the request.
if self.log_requests:
self.logger.debug(f'Request -> {method} {path}: {req_params}')
# Prepare request; use 'params' for GET and 'data' for POST.
if method == 'GET':
r = self.client.prepare_request(
requests.Request(method, path, params=req_params)
)
else:
r = self.client.prepare_request(
requests.Request(method, path, data=json.dumps(req_params))
)
# Attempt the request.
try:
s = self.client.send(r, timeout=self.timeout)
# If requests fires an error, retry.
except (
requests.exceptions.ReadTimeout,
requests.exceptions.SSLError,
requests.exceptions.ConnectionError
) as e:
if self.force_retry:
self.logger.error(f'{e}. {retries_remaining}')
time.sleep(self.retry_delay)
continue
else:
raise e
# Convert response to dictionary, or raise if requests error.
try:
s_json = s.json()
# If we have trouble converting, handle the error and retry.
except JSONDecodeError as e:
if self.force_retry:
self.logger.error(f'{e}. {retries_remaining}')
time.sleep(self.retry_delay)
continue
else:
raise FailedRequestError(
request=f'{method} {path}: {req_params}',
message='Conflict. Could not decode JSON.',
status_code=409,
time=dt.utcnow().strftime("%H:%M:%S")
)
# If Bybit returns an error, raise.
if s_json['ret_code']:
# Generate error message.
error_msg = (
f'{s_json["ret_msg"]} (ErrCode: {s_json["ret_code"]})'
)
# Set default retry delay.
err_delay = self.retry_delay
# Retry non-fatal whitelisted error requests.
if s_json['ret_code'] in self.retry_codes:
# 10002, recv_window error; add 2.5 seconds and retry.
if s_json['ret_code'] == 10002:
error_msg += '. Added 2.5 seconds to recv_window'
recv_window += 2500
# 10006, ratelimit error; wait until rate_limit_reset_ms
# and retry.
elif s_json['ret_code'] == 10006:
self.logger.error(
f'{error_msg}. Ratelimited on current request. '
f'Sleeping, then trying again. Request: {path}'
)
# Calculate how long we need to wait.
limit_reset = s_json['rate_limit_reset_ms'] / 1000
reset_str = time.strftime(
'%X', time.localtime(limit_reset)
)
err_delay = int(limit_reset) - int(time.time())
error_msg = (
f'Ratelimit will reset at {reset_str}. '
f'Sleeping for {err_delay} seconds'
)
# Log the error.
self.logger.error(f'{error_msg}. {retries_remaining}')
time.sleep(err_delay)
continue
elif s_json['ret_code'] in self.ignore_codes:
pass
else:
raise InvalidRequestError(
request=f'{method} {path}: {req_params}',
message=s_json["ret_msg"],
status_code=s_json["ret_code"],
time=dt.utcnow().strftime("%H:%M:%S")
)
else:
return s_json
class WebSocket:
"""
Connector for Bybit's WebSocket API.
"""
def __init__(self, endpoint, api_key=None, api_secret=None,
subscriptions=None, logging_level=logging.INFO,
max_data_length=200, ping_interval=30, ping_timeout=10,
restart_on_error=True, purge_on_fetch=True,
trim_data=True):
"""
Initializes the websocket session.
:param endpoint: Required parameter. The endpoint of the remote
websocket.
:param api_key: Your API key. Required for authenticated endpoints.
Defaults to None.
:param api_secret: Your API secret key. Required for authenticated
endpoints. Defaults to None.
:param subscriptions: A list of desired topics to subscribe to. See API
documentation for more information. Defaults to an empty list, which
will raise an error.
:param logging_level: The logging level of the built-in logger. Defaults
to logging.INFO. Options are CRITICAL (50), ERROR (40),
WARNING (30), INFO (20), DEBUG (10), or NOTSET (0).
:param max_data_length: The maximum number of rows for the stored
dataset. A smaller number will prevent performance or memory issues.
:param ping_interval: The number of seconds between each automated ping.
:param ping_timeout: The number of seconds to wait for 'pong' before an
Exception is raised.
:param restart_on_error: Whether or not the connection should restart on
error.
:param purge_on_fetch: Whether or not stored data should be purged each
fetch. For example, if the user subscribes to the 'trade' topic, and
fetches, should the data show all trade history up to the maximum
length or only get the data since the last fetch?
:param trim_data: Decide whether the returning data should be
trimmed to only provide the data value.
:returns: WebSocket session.
"""
if not subscriptions:
raise Exception('Subscription list cannot be empty!')
# Require symbol on 'trade' topic.
if 'trade' in subscriptions:
raise Exception('\'trade\' requires a ticker, e.g. '
'\'trade.BTCUSD\'.')
# Require currency on 'insurance' topic.
if 'insurance' in subscriptions:
raise Exception('\'insurance\' requires a currency, e.g. '
'\'insurance.BTC\'.')
# Require timeframe and ticker on 'klineV2' topic.
if 'klineV2' in subscriptions:
raise Exception('\'klineV2\' requires a timeframe and ticker, e.g.'
' \'klineV2.5.BTCUSD\'.')
# set websocket name for logging purposes
self.wsName = 'Authenticated' if api_key else 'Non-Authenticated'
# Setup logger.
self.logger = logging.getLogger(__name__)
if len(logging.root.handlers) == 0:
# no handler on root logger set -> we add handler just for this logger to not mess with custom logic from outside
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
)
handler.setLevel(logging_level)
self.logger.addHandler(handler)
self.logger.debug(f'Initializing {self.wsName} WebSocket.')
# Ensure authentication for private topics.
if any(i in subscriptions for i in [
'position',
'execution',
'order',
'stop_order',
'wallet'
]) and api_key is None:
raise PermissionError('You must be authorized to use '
'private topics!')
# Set endpoint.
self.endpoint = endpoint
# Set API keys.
self.api_key = api_key
self.api_secret = api_secret
# Set topic subscriptions for WebSocket.
self.subscriptions = subscriptions
self.max_length = max_data_length
# Set ping settings.
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
# Other optional data handling settings.
self.handle_error = restart_on_error
self.purge = purge_on_fetch
self.trim = trim_data
# Set initial state, initialize dictionary and connnect.
self._reset()
self._connect(self.endpoint)
def fetch(self, topic):
"""
Fetches data from the subscribed topic.
:param topic: Required parameter. The subscribed topic to poll.
:returns: Filtered data as dict.
"""
# If topic isn't a string.
if not isinstance(topic, str):
self.logger.error('Topic argument must be a string.')
return
# If the topic given isn't in the initial subscribed list.
if topic not in self.subscriptions:
self.logger.error(f'You aren\'t subscribed to the {topic} topic.')
return
# Pop all trade or execution data on each poll.
# dont pop order or stop_order data as we will lose valuable state
if topic.startswith((
'trade',
'execution'
)) and not topic.startswith('orderBook'):
data = self.data[topic].copy()
if self.purge:
self.data[topic] = []
return data
else:
try:
return self.data[topic]
except KeyError:
return []
def ping(self):
"""
Pings the remote server to test the connection. The status of the
connection can be monitored using ws.ping().
"""
self.ws.send(json.dumps({'op': 'ping'}))
def exit(self):
"""
Closes the websocket connection.
"""
self.ws.close()
while self.ws.sock:
continue
self.exited = True
def _auth(self):
"""
Authorize websocket connection.
"""
# Generate expires.
expires = int((time.time() + 1) * 1000)
# Generate signature.
_val = f'GET/realtime{expires}'
signature = str(hmac.new(
bytes(self.api_secret, 'utf-8'),
bytes(_val, 'utf-8'), digestmod='sha256'
).hexdigest())
# Authenticate with API.
self.ws.send(
json.dumps({
'op': 'auth',
'args': [self.api_key, expires, signature]
})
)
def _connect(self, url):
"""
Open websocket in a thread.
"""
self.ws = websocket.WebSocketApp(
url=url,
on_message=lambda ws, msg: self._on_message(msg),
on_close=self._on_close(),
on_open=self._on_open(),
on_error=lambda ws, err: self._on_error(err)
)
# Setup the thread running WebSocketApp.
self.wst = threading.Thread(target=lambda: self.ws.run_forever(
ping_interval=self.ping_interval,
ping_timeout=self.ping_timeout
))
# Configure as daemon; start.
self.wst.daemon = True
self.wst.start()
# Attempt to connect for X seconds.
retries = 10
while retries > 0 and (not self.ws.sock or not self.ws.sock.connected):
retries -= 1
time.sleep(1)
# If connection was not successful, raise error.
if retries <= 0:
self.exit()
raise websocket.WebSocketTimeoutException('Connection failed.')
# If given an api_key, authenticate.
if self.api_key and self.api_secret:
self._auth()
# Check if subscriptions is a list.
if isinstance(self.subscriptions, str):
self.subscriptions = [self.subscriptions]
# Subscribe to the requested topics.
self.ws.send(
json.dumps({
'op': 'subscribe',
'args': self.subscriptions
})
)
# Initialize the topics.
for topic in self.subscriptions:
if topic not in self.data:
self.data[topic] = {}
@staticmethod
def _find_index(source, target, key):
"""
Find the index in source list of the targeted ID.
"""
return next(i for i, j in enumerate(source) if j[key] == target[key])
def _on_message(self, message):
"""
Parse incoming messages. Similar structure to the
official WS connector.
"""
# Load dict of message.
msg_json = json.loads(message)
# If 'success' exists
if 'success' in msg_json:
if msg_json['success']:
# If 'request' exists.
if 'request' in msg_json:
# If we get succesful auth, notify user.
if msg_json['request']['op'] == 'auth':
self.logger.debug('Authorization successful.')
self.auth = True
# If we get successful subscription, notify user.
if msg_json['request']['op'] == 'subscribe':
sub = msg_json['request']['args']
self.logger.debug(f'Subscription to {sub} successful.')
else:
response = msg_json['ret_msg']
if 'unknown topic' in response:
self.logger.error('Couldn\'t subscribe to topic.'
f' Error: {response}.')
# If we get unsuccesful auth, notify user.
elif msg_json['request']['op'] == 'auth':
self.logger.debug('Authorization failed. Please check your '
'API keys and restart.')
elif 'topic' in msg_json:
topic = msg_json['topic']
# If incoming 'orderbookL2' data.
if 'orderBook' in topic:
# Make updates according to delta response.
if 'delta' in msg_json['type']:
# Delete.
for entry in msg_json['data']['delete']:
index = self._find_index(self.data[topic], entry, 'id')
self.data[topic].pop(index)
# Update.
for entry in msg_json['data']['update']:
index = self._find_index(self.data[topic], entry, 'id')
self.data[topic][index] = entry
# Insert.
for entry in msg_json['data']['insert']:
self.data[topic].append(entry)
# Record the initial snapshot.
elif 'snapshot' in msg_json['type']:
self.data[topic] = msg_json['data']
# For incoming 'order' and 'stop_order' data.
elif any(i in topic for i in ['order', 'stop_order']):
# record incoming data
for i in msg_json['data']:
try:
# update existing entries
# temporary workaround for field anomaly in stop_order data
ord_id = topic + '_id' if i['symbol'].endswith('USDT') else 'order_id'
index = self._find_index(self.data[topic], i, ord_id)
self.data[topic][index] = i
except StopIteration:
# Keep appending or create new list if not already created.
try:
self.data[topic].append(i)
except AttributeError:
self.data[topic] = msg_json['data']
# For incoming 'trade' and 'execution' data.
elif any(i in topic for i in ['trade', 'execution']):
# Keep appending or create new list if not already created.
try:
for i in msg_json['data']:
self.data[topic].append(i)
except AttributeError:
self.data[topic] = msg_json['data']
# If list is too long, pop the first entry.
if len(self.data[topic]) > self.max_length:
self.data[topic].pop(0)
# If incoming 'insurance', 'klineV2', or 'wallet' data.
elif any(i in topic for i in ['insurance', 'klineV2', 'wallet',
'candle']):
# Record incoming data.
self.data[topic] = msg_json['data'][0] if self.trim else msg_json
# If incoming 'instrument_info' data.
elif 'instrument_info' in topic:
# Make updates according to delta response.
if 'delta' in msg_json['type']:
for i in msg_json['data']['update'][0]:
self.data[topic][i] = msg_json['data']['update'][0][i]
# Record the initial snapshot.
elif 'snapshot' in msg_json['type']:
self.data[topic] = msg_json['data'] if self.trim else msg_json
# If incoming 'position' data.
elif 'position' in topic:
# Record incoming position data.
for p in msg_json['data']:
# linear (USDT) positions have Buy|Sell side and
# updates contain all USDT positions.
# For linear tickers...
if p['symbol'].endswith('USDT'):
try:
self.data[topic][p['symbol']][p['side']] = p
# if side key hasn't been created yet...
except KeyError:
self.data[topic][p['symbol']] = {p['side']: p}
# For non-linear tickers...
else:
self.data[topic][p['symbol']] = p
def _on_error(self, error):
"""
Exit on errors and raise exception, or attempt reconnect.
"""
if not self.exited:
self.logger.error(f'WebSocket {self.wsName} encountered error: {error}.')
self.exit()
# Reconnect.
if self.handle_error:
self._reset()
self._connect(self.endpoint)
def _on_open(self):
"""
Log WS open.
"""
self.logger.debug(f'WebSocket {self.wsName} opened.')
def _on_close(self):
"""
Log WS close.
"""
self.logger.debug(f'WebSocket {self.wsName} closed.')
def _reset(self):
"""
Set state booleans and initialize dictionary.
"""
self.exited = False
self.auth = False
self.data = {}
|
import logging
from pajbot.managers.db import DBManager
from pajbot.models.command import Command
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
from pajbot.utils import time_since
log = logging.getLogger(__name__)
class TopModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Top commands"
DESCRIPTION = "Commands that show the top X users of something"
CATEGORY = "Feature"
SETTINGS = [
ModuleSetting(
key="num_top",
label="How many people we should list",
type="number",
required=True,
placeholder="min 1, max 5",
default=3,
constraints={"min_value": 1, "max_value": 5},
),
ModuleSetting(
key="enable_topchatters",
label="Enable the !topchatters command (most messages)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topwatchers",
label="Enable the !topwatchers command (most time spent watching the stream)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topoffline",
label="Enable the !topoffline command (most time spent in offline chat)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_toppoints",
label="Enable the !toppoints command (most points)",
type="boolean",
required=True,
default=False,
),
]
def top_chatters(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.num_lines.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.num_lines})")
bot.say(f"Top {self.settings["num_top"]} chatters: {", ".join(data)}")
def top_watchers(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_online.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_online.total_seconds(), 0, time_format="short")})")
bot.say(f"Top {self.settings["num_top"]} watchers: {", ".join(data)}")
def top_offline(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_offline.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_offline.total_seconds(), 0, time_format="short")})")
bot.say(f"Top {self.settings["num_top"]} offline chatters: {", ".join(data)}")
def top_points(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.points.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.points})")
bot.say(f"Top {self.settings["num_top"]} banks: {", ".join(data)}")
def load_commands(self, **options):
if self.settings["enable_topchatters"]:
self.commands["topchatters"] = Command.raw_command(self.top_chatters)
if self.settings["enable_topwatchers"]:
self.commands["topwatchers"] = Command.raw_command(self.top_watchers)
if self.settings["enable_topoffline"]:
self.commands["topoffline"] = Command.raw_command(self.top_offline)
if self.settings["enable_toppoints"]:
self.commands["toppoints"] = Command.raw_command(self.top_points)
|
import logging
from pajbot.managers.db import DBManager
from pajbot.models.command import Command
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
from pajbot.utils import time_since
log = logging.getLogger(__name__)
class TopModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Top commands"
DESCRIPTION = "Commands that show the top X users of something"
CATEGORY = "Feature"
SETTINGS = [
ModuleSetting(
key="num_top",
label="How many people we should list",
type="number",
required=True,
placeholder="min 1, max 5",
default=3,
constraints={"min_value": 1, "max_value": 5},
),
ModuleSetting(
key="enable_topchatters",
label="Enable the !topchatters command (most messages)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topwatchers",
label="Enable the !topwatchers command (most time spent watching the stream)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_topoffline",
label="Enable the !topoffline command (most time spent in offline chat)",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_toppoints",
label="Enable the !toppoints command (most points)",
type="boolean",
required=True,
default=False,
),
]
def top_chatters(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.num_lines.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.num_lines})")
bot.say(f"Top {self.settings['num_top']} chatters: {', '.join(data)}")
def top_watchers(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_online.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_online.total_seconds(), 0, time_format='short')})")
bot.say(f"Top {self.settings['num_top']} watchers: {', '.join(data)}")
def top_offline(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in (
db_session.query(User).filter_by(ignored=False).order_by(User.time_in_chat_offline.desc()).limit(self.settings["num_top"])
):
data.append(f"{user} ({time_since(user.time_in_chat_offline.total_seconds(), 0, time_format='short')})")
bot.say(f"Top {self.settings['num_top']} offline chatters: {', '.join(data)}")
def top_points(self, bot, **rest):
data = []
with DBManager.create_session_scope() as db_session:
for user in db_session.query(User).filter_by(ignored=False).order_by(User.points.desc()).limit(self.settings["num_top"]):
data.append(f"{user} ({user.points})")
bot.say(f"Top {self.settings['num_top']} banks: {', '.join(data)}")
def load_commands(self, **options):
if self.settings["enable_topchatters"]:
self.commands["topchatters"] = Command.raw_command(self.top_chatters)
if self.settings["enable_topwatchers"]:
self.commands["topwatchers"] = Command.raw_command(self.top_watchers)
if self.settings["enable_topoffline"]:
self.commands["topoffline"] = Command.raw_command(self.top_offline)
if self.settings["enable_toppoints"]:
self.commands["toppoints"] = Command.raw_command(self.top_points)
|
import json
import os
import sqlite3
from datetime import datetime
from pathlib import Path
import asyncpg
from tabulate import tabulate
from typing import Dict
from openmaptiles.pgutils import get_postgis_version, get_vector_layers
from openmaptiles.sqlite_utils import query
from openmaptiles.sqltomvt import MvtGenerator
from openmaptiles.tileset import Tileset
from openmaptiles.utils import print_err, Bbox, print_tile, shorten_str
class KeyFinder:
"""Search mbtiles for frequently used duplicate tiles"""
def __init__(self,
mbtiles,
show_size=None,
show_examples=None,
outfile: str = None,
zoom=None,
min_dup_count=None,
verbose=False) -> None:
self.mbtiles = mbtiles
if min_dup_count is not None:
min_dup_count = int(min_dup_count)
if min_dup_count < 2:
raise ValueError(f"min_dup_count must be an integer ≥ 2")
self.min_dup_count = min_dup_count
else:
self.min_dup_count = 50 if zoom and zoom > 12 else 20
self.use_stdout = outfile == '-'
self.zoom = zoom
self.verbose = verbose
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
self.show_size = self.verbose if show_size is None else show_size
self.show_examples = self.verbose if show_examples is None else show_examples
def run(self):
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass # create or truncate file, but don't write anything to it yet
with sqlite3.connect(self.mbtiles) as conn:
results = []
if self.show_size:
sql = "SELECT cnt, dups.tile_id, LENGTH(tile_data) FROM (" \
" SELECT tile_id, COUNT(*) AS cnt FROM map " \
" GROUP BY tile_id HAVING cnt >= ?" \
") dups JOIN images ON images.tile_id = dups.tile_id"
sql_opts = [self.min_dup_count]
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
else:
sql_opts = []
sql = "SELECT COUNT(*) cnt, tile_id FROM map"
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
sql += " GROUP BY tile_id HAVING cnt >= ?"
sql_opts.append(self.min_dup_count)
for vals in query(conn, sql, sql_opts):
results.append(vals)
results.sort(reverse=True)
size = None
examples = None
for vals in results:
if len(vals) == 3:
count, tile_id, size = vals
else:
count, tile_id = vals
if self.show_examples:
example_sql = "SELECT zoom_level, tile_column, tile_row FROM map " \
"WHERE tile_id = ? LIMIT 5"
examples = [f'{z}/{x}/{y}' for z, x, y in
query(conn, example_sql, [tile_id])]
if self.verbose:
res = f"{tile_id} x {count:,}"
if self.show_size:
res += f', {size:,} bytes'
if self.show_examples:
res += ', examples: ' + ', '.join(examples)
print_err(res)
results = [v[1] for v in results]
if self.use_stdout:
for v in results:
print(v)
elif self.outfile:
with self.outfile.open("a") as f:
f.writelines([str(v) + '\n' for v in results])
return results
class Imputer:
def __init__(self, mbtiles, keys, zoom, outfile: str = None,
verbose=False) -> None:
self.mbtiles = mbtiles
self.keys = {k: 0 for k in keys}
self.zoom = zoom
self.use_stdout = outfile == '-'
self.verbose = verbose or not self.use_stdout
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
def run(self):
with sqlite3.connect(self.mbtiles) as conn:
limit_to_keys = not self.outfile
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass # create or truncate file, but don't write anything to it yet
keyed_tiles = 0
nokey_tiles = 0
cursor = conn.cursor()
key_stats = self.keys
for with_key, without_key in self.tile_batches(conn, limit_to_keys):
without_key.sort()
if with_key:
with_key.sort()
for val in with_key:
key_stats[val[3]] += 1
cursor.executemany(
'INSERT OR IGNORE INTO map'
'(zoom_level, tile_column, tile_row, tile_id)'
' VALUES(?,?,?,?)',
with_key)
keyed_tiles += cursor.rowcount
conn.commit()
if without_key:
if self.use_stdout:
for v in without_key:
print(v, end='')
else:
with self.outfile.open("a") as f:
f.writelines(without_key)
nokey_tiles += len(without_key)
if self.verbose:
for k, c in key_stats.items():
print_err(f"{k} - added {c:,}")
print_err(f'Total imputed tiles: {keyed_tiles:,}')
if nokey_tiles:
print_err(f'Total tiles need to be generated: {nokey_tiles:,}')
def tile_batches(self, conn: sqlite3.Connection, limit_to_keys=False):
"""Generate batches of tiles to be processed for the new zoom,
based on the previous zoom level. Each yield contains two batches:
one with "empty" tiles (those that match known keys),
and another with non-empty tiles (only if limit_to_keys is False).
The first batch can be inserted into mbtiles db as is.
The second batch will be used as a list of tiles to be generated.
"""
batch_size = 1000000
zoom = self.zoom
search_zoom = zoom - 1
sql = f"SELECT tile_column, tile_row, tile_id FROM map WHERE zoom_level=?"
sql_args = [search_zoom]
if limit_to_keys:
sql += f" and tile_id IN ({",".join(("?" * len(self.keys)))})"
sql_args += self.keys
with_key = []
without_key = []
max_y = 2 ** search_zoom - 1
for x, y, key in query(conn, sql, sql_args):
if limit_to_keys or key in self.keys:
with_key.append((zoom, x * 2, y * 2, key))
with_key.append((zoom, x * 2 + 1, y * 2, key))
with_key.append((zoom, x * 2, y * 2 + 1, key))
with_key.append((zoom, x * 2 + 1, y * 2 + 1, key))
else:
# mbtiles uses inverted Y (starts at the bottom)
ry = max_y - y
without_key.append(f"{zoom}/{x * 2}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2}/{ry * 2 + 1}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2 + 1}\n")
if len(with_key) > batch_size or len(without_key) > batch_size:
yield with_key, without_key
with_key = []
without_key = []
if with_key or without_key:
yield with_key, without_key
class Metadata:
def __init__(self, mbtiles: str, show_json: bool = False,
show_ranges: bool = False) -> None:
self.mbtiles = mbtiles
self.show_json = show_json
self.show_ranges = show_ranges
def print_all(self, file: str = None):
file = file or self.mbtiles
data = self._get_metadata(file)
if data:
width = max((len(v) for v in data.keys()))
for name, value in sorted(data.items(),
key=lambda v: v[0] if v[0] != 'json' else 'zz'):
print(f"{name:{width}} {self.validate(name, value)[0]}")
else:
print(f"There are no values present in {file} metadata table")
if self.show_ranges:
with sqlite3.connect(file) as conn:
sql = """\
SELECT zoom_level, COUNT(*) AS count,
MIN(tile_column) AS min_column, MAX(tile_column) AS max_column,
MIN(tile_row) AS min_row, MAX(tile_row) AS max_row
FROM map
GROUP BY zoom_level
"""
res = []
for z, cnt, min_x, max_x, min_y, max_y in sorted(query(conn, sql, [])):
res.append({
"Zoom": z,
"Tile count": f"{cnt:,}",
"Found tile ranges": f"{min_x},{min_y} x {max_x},{max_y}",
})
print("\n" + tabulate(res, headers="keys"))
def get_value(self, name):
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
cursor.execute("SELECT value FROM metadata WHERE name=?", [name])
row = cursor.fetchone()
if row is None:
print_err(f"Metadata field '{name}' is not found")
exit(1)
print(row[0])
def set_value(self, name, value):
if value is not None:
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
if value is None:
cursor.execute("DELETE FROM metadata WHERE name=?;", [name])
else:
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
async def generate(self, tileset, reset, auto_minmax,
pghost, pgport, dbname, user, password):
ts = Tileset.parse(tileset)
print(
f'Connecting to PostgreSQL at {pghost}:{pgport}, db={dbname}, user={user}...')
try:
async with asyncpg.create_pool(
database=dbname, host=pghost, port=pgport, user=user,
password=password, min_size=1, max_size=1,
) as pool:
async with pool.acquire() as conn:
mvt = MvtGenerator(
ts,
postgis_ver=await get_postgis_version(conn),
zoom='$1', x='$2', y='$3',
)
json_data = dict(vector_layers=await get_vector_layers(conn, mvt))
except ConnectionError as err:
print(f"Unable to connect to Postgres database: {err}")
raise err
# Convert tileset to the metadata object according to mbtiles 1.3 spec
# https://github.com/mapbox/mbtiles-spec/blob/master/1.3/spec.md#content
metadata = dict(
# MUST
name=ts.name,
format="pbf",
json=json.dumps(json_data, ensure_ascii=False, separators=(',', ':')),
# SHOULD
bounds=",".join((str(v) for v in ts.bounds)),
center=",".join((str(v) for v in ts.center)),
minzoom=str(ts.minzoom),
maxzoom=str(ts.maxzoom),
# MAY
attribution=ts.attribution,
description=ts.description,
version=ts.version,
# EXTRAS
id=ts.id,
)
self._update_metadata(metadata, auto_minmax, reset, self.mbtiles,
ts.center[2])
def copy(self, target_mbtiles, reset, auto_minmax):
metadata = self._get_metadata(self.mbtiles)
self._update_metadata(metadata, auto_minmax, reset, target_mbtiles)
def show_tile(self, zoom, x, y, show_names, summary):
with sqlite3.connect(self.mbtiles) as conn:
sql = "SELECT tile_data FROM tiles " \
"WHERE zoom_level=? AND tile_column=? AND tile_row=?"
for row in query(conn, sql, [zoom, x, y]):
print_tile(row[0], show_names, summary, f"{zoom}/{x}/{y}")
break
else:
print(f"Tile {zoom}/{x}/{y} not found")
def _update_metadata(self, metadata, auto_minmax, reset, file, center_zoom=None):
def update_from_env(param, env_var):
val = os.environ.get(env_var)
if val is not None:
metadata[param] = val
update_from_env('name', 'METADATA_NAME')
update_from_env('minzoom', 'MIN_ZOOM')
update_from_env('maxzoom', 'MAX_ZOOM')
update_from_env('attribution', 'METADATA_ATTRIBUTION')
update_from_env('description', 'METADATA_DESCRIPTION')
update_from_env('version', 'METADATA_VERSION')
metadata['filesize'] = os.path.getsize(file)
bbox_str = os.environ.get('BBOX')
if bbox_str:
bbox = Bbox(bbox=bbox_str,
center_zoom=os.environ.get('CENTER_ZOOM', center_zoom))
metadata["bounds"] = bbox.bounds_str()
metadata["center"] = bbox.center_str()
with sqlite3.connect(file) as conn:
cursor = conn.cursor()
if auto_minmax:
cursor.execute("SELECT MIN(zoom_level), MAX(zoom_level) FROM map")
min_z, max_z = cursor.fetchone()
if min_z is None:
raise ValueError("Unable to get min/max zoom - tile data is empty")
metadata["minzoom"] = min_z
metadata["maxzoom"] = max_z
self._update_metadata_db(cursor, metadata, reset)
conn.commit()
print(f"New metadata values in {file}")
self.print_all(file=file)
@staticmethod
def _get_metadata(file) -> Dict[str, str]:
with sqlite3.connect(file) as conn:
return {k: v for k, v in
query(conn, "SELECT name, value FROM metadata", [])}
def _update_metadata_db(self, cursor, metadata, reset):
if reset:
# noinspection SqlWithoutWhere
cursor.execute("DELETE FROM metadata;")
for name, value in metadata.items():
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
def validate(self, name, value):
is_valid = True
if name == 'mtime':
try:
val = datetime.fromtimestamp(int(value) / 1000.0)
value = f'{value} ({val.isoformat()})'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name in ('filesize', 'maskLevel', 'minzoom', 'maxzoom'):
try:
value = f'{int(value):,}'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name == 'json':
try:
val = json.loads(value)
if self.show_json:
value = f'(valid JSON value)'
else:
value = '(The value is a valid JSON, use --show-json for raw dump)'
res = []
for v in val["vector_layers"]:
desc = ""
if "description" in v:
desc = shorten_str(v["description"], 40)
fields = []
names = []
for fld in v["fields"].keys():
if fld.startswith("name:"):
names.append(fld[5:])
else:
fields.append(fld)
fields_str = ", ".join(v for v in fields)
if names:
fields_str += f", name:* ({shorten_str(",".join(names), 20)})"
res.append({
"layer": v["id"],
"minZ": v["minzoom"],
"maxZ": v["maxzoom"],
"fields": fields_str,
"description": desc
})
value += "\n\n" + tabulate(res, headers="keys")
if self.show_json:
value += "\n\n"
value += json.dumps(val, ensure_ascii=False, indent=2)
except ValueError:
is_valid = False
if self.show_json:
value = f'(invalid JSON value)\n{value}'
else:
value = f'(invalid JSON value, use --show-json to see it)'
return value, is_valid
|
import json
import os
import sqlite3
from datetime import datetime
from pathlib import Path
import asyncpg
from tabulate import tabulate
from typing import Dict
from openmaptiles.pgutils import get_postgis_version, get_vector_layers
from openmaptiles.sqlite_utils import query
from openmaptiles.sqltomvt import MvtGenerator
from openmaptiles.tileset import Tileset
from openmaptiles.utils import print_err, Bbox, print_tile, shorten_str
class KeyFinder:
"""Search mbtiles for frequently used duplicate tiles"""
def __init__(self,
mbtiles,
show_size=None,
show_examples=None,
outfile: str = None,
zoom=None,
min_dup_count=None,
verbose=False) -> None:
self.mbtiles = mbtiles
if min_dup_count is not None:
min_dup_count = int(min_dup_count)
if min_dup_count < 2:
raise ValueError(f"min_dup_count must be an integer ≥ 2")
self.min_dup_count = min_dup_count
else:
self.min_dup_count = 50 if zoom and zoom > 12 else 20
self.use_stdout = outfile == '-'
self.zoom = zoom
self.verbose = verbose
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
self.show_size = self.verbose if show_size is None else show_size
self.show_examples = self.verbose if show_examples is None else show_examples
def run(self):
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass # create or truncate file, but don't write anything to it yet
with sqlite3.connect(self.mbtiles) as conn:
results = []
if self.show_size:
sql = "SELECT cnt, dups.tile_id, LENGTH(tile_data) FROM (" \
" SELECT tile_id, COUNT(*) AS cnt FROM map " \
" GROUP BY tile_id HAVING cnt >= ?" \
") dups JOIN images ON images.tile_id = dups.tile_id"
sql_opts = [self.min_dup_count]
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
else:
sql_opts = []
sql = "SELECT COUNT(*) cnt, tile_id FROM map"
if self.zoom:
sql += f" WHERE zoom_level=?"
sql_opts.append(self.zoom)
sql += " GROUP BY tile_id HAVING cnt >= ?"
sql_opts.append(self.min_dup_count)
for vals in query(conn, sql, sql_opts):
results.append(vals)
results.sort(reverse=True)
size = None
examples = None
for vals in results:
if len(vals) == 3:
count, tile_id, size = vals
else:
count, tile_id = vals
if self.show_examples:
example_sql = "SELECT zoom_level, tile_column, tile_row FROM map " \
"WHERE tile_id = ? LIMIT 5"
examples = [f'{z}/{x}/{y}' for z, x, y in
query(conn, example_sql, [tile_id])]
if self.verbose:
res = f"{tile_id} x {count:,}"
if self.show_size:
res += f', {size:,} bytes'
if self.show_examples:
res += ', examples: ' + ', '.join(examples)
print_err(res)
results = [v[1] for v in results]
if self.use_stdout:
for v in results:
print(v)
elif self.outfile:
with self.outfile.open("a") as f:
f.writelines([str(v) + '\n' for v in results])
return results
class Imputer:
def __init__(self, mbtiles, keys, zoom, outfile: str = None,
verbose=False) -> None:
self.mbtiles = mbtiles
self.keys = {k: 0 for k in keys}
self.zoom = zoom
self.use_stdout = outfile == '-'
self.verbose = verbose or not self.use_stdout
if outfile:
self.outfile = True if self.use_stdout else Path(outfile)
else:
self.outfile = None
def run(self):
with sqlite3.connect(self.mbtiles) as conn:
limit_to_keys = not self.outfile
if self.outfile and not self.use_stdout:
with self.outfile.open("w"):
pass # create or truncate file, but don't write anything to it yet
keyed_tiles = 0
nokey_tiles = 0
cursor = conn.cursor()
key_stats = self.keys
for with_key, without_key in self.tile_batches(conn, limit_to_keys):
without_key.sort()
if with_key:
with_key.sort()
for val in with_key:
key_stats[val[3]] += 1
cursor.executemany(
'INSERT OR IGNORE INTO map'
'(zoom_level, tile_column, tile_row, tile_id)'
' VALUES(?,?,?,?)',
with_key)
keyed_tiles += cursor.rowcount
conn.commit()
if without_key:
if self.use_stdout:
for v in without_key:
print(v, end='')
else:
with self.outfile.open("a") as f:
f.writelines(without_key)
nokey_tiles += len(without_key)
if self.verbose:
for k, c in key_stats.items():
print_err(f"{k} - added {c:,}")
print_err(f'Total imputed tiles: {keyed_tiles:,}')
if nokey_tiles:
print_err(f'Total tiles need to be generated: {nokey_tiles:,}')
def tile_batches(self, conn: sqlite3.Connection, limit_to_keys=False):
"""Generate batches of tiles to be processed for the new zoom,
based on the previous zoom level. Each yield contains two batches:
one with "empty" tiles (those that match known keys),
and another with non-empty tiles (only if limit_to_keys is False).
The first batch can be inserted into mbtiles db as is.
The second batch will be used as a list of tiles to be generated.
"""
batch_size = 1000000
zoom = self.zoom
search_zoom = zoom - 1
sql = f"SELECT tile_column, tile_row, tile_id FROM map WHERE zoom_level=?"
sql_args = [search_zoom]
if limit_to_keys:
sql += f" and tile_id IN ({','.join(('?' * len(self.keys)))})"
sql_args += self.keys
with_key = []
without_key = []
max_y = 2 ** search_zoom - 1
for x, y, key in query(conn, sql, sql_args):
if limit_to_keys or key in self.keys:
with_key.append((zoom, x * 2, y * 2, key))
with_key.append((zoom, x * 2 + 1, y * 2, key))
with_key.append((zoom, x * 2, y * 2 + 1, key))
with_key.append((zoom, x * 2 + 1, y * 2 + 1, key))
else:
# mbtiles uses inverted Y (starts at the bottom)
ry = max_y - y
without_key.append(f"{zoom}/{x * 2}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2}\n")
without_key.append(f"{zoom}/{x * 2}/{ry * 2 + 1}\n")
without_key.append(f"{zoom}/{x * 2 + 1}/{ry * 2 + 1}\n")
if len(with_key) > batch_size or len(without_key) > batch_size:
yield with_key, without_key
with_key = []
without_key = []
if with_key or without_key:
yield with_key, without_key
class Metadata:
def __init__(self, mbtiles: str, show_json: bool = False,
show_ranges: bool = False) -> None:
self.mbtiles = mbtiles
self.show_json = show_json
self.show_ranges = show_ranges
def print_all(self, file: str = None):
file = file or self.mbtiles
data = self._get_metadata(file)
if data:
width = max((len(v) for v in data.keys()))
for name, value in sorted(data.items(),
key=lambda v: v[0] if v[0] != 'json' else 'zz'):
print(f"{name:{width}} {self.validate(name, value)[0]}")
else:
print(f"There are no values present in {file} metadata table")
if self.show_ranges:
with sqlite3.connect(file) as conn:
sql = """\
SELECT zoom_level, COUNT(*) AS count,
MIN(tile_column) AS min_column, MAX(tile_column) AS max_column,
MIN(tile_row) AS min_row, MAX(tile_row) AS max_row
FROM map
GROUP BY zoom_level
"""
res = []
for z, cnt, min_x, max_x, min_y, max_y in sorted(query(conn, sql, [])):
res.append({
"Zoom": z,
"Tile count": f"{cnt:,}",
"Found tile ranges": f"{min_x},{min_y} x {max_x},{max_y}",
})
print("\n" + tabulate(res, headers="keys"))
def get_value(self, name):
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
cursor.execute("SELECT value FROM metadata WHERE name=?", [name])
row = cursor.fetchone()
if row is None:
print_err(f"Metadata field '{name}' is not found")
exit(1)
print(row[0])
def set_value(self, name, value):
if value is not None:
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
with sqlite3.connect(self.mbtiles) as conn:
cursor = conn.cursor()
if value is None:
cursor.execute("DELETE FROM metadata WHERE name=?;", [name])
else:
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
async def generate(self, tileset, reset, auto_minmax,
pghost, pgport, dbname, user, password):
ts = Tileset.parse(tileset)
print(
f'Connecting to PostgreSQL at {pghost}:{pgport}, db={dbname}, user={user}...')
try:
async with asyncpg.create_pool(
database=dbname, host=pghost, port=pgport, user=user,
password=password, min_size=1, max_size=1,
) as pool:
async with pool.acquire() as conn:
mvt = MvtGenerator(
ts,
postgis_ver=await get_postgis_version(conn),
zoom='$1', x='$2', y='$3',
)
json_data = dict(vector_layers=await get_vector_layers(conn, mvt))
except ConnectionError as err:
print(f"Unable to connect to Postgres database: {err}")
raise err
# Convert tileset to the metadata object according to mbtiles 1.3 spec
# https://github.com/mapbox/mbtiles-spec/blob/master/1.3/spec.md#content
metadata = dict(
# MUST
name=ts.name,
format="pbf",
json=json.dumps(json_data, ensure_ascii=False, separators=(',', ':')),
# SHOULD
bounds=",".join((str(v) for v in ts.bounds)),
center=",".join((str(v) for v in ts.center)),
minzoom=str(ts.minzoom),
maxzoom=str(ts.maxzoom),
# MAY
attribution=ts.attribution,
description=ts.description,
version=ts.version,
# EXTRAS
id=ts.id,
)
self._update_metadata(metadata, auto_minmax, reset, self.mbtiles,
ts.center[2])
def copy(self, target_mbtiles, reset, auto_minmax):
metadata = self._get_metadata(self.mbtiles)
self._update_metadata(metadata, auto_minmax, reset, target_mbtiles)
def show_tile(self, zoom, x, y, show_names, summary):
with sqlite3.connect(self.mbtiles) as conn:
sql = "SELECT tile_data FROM tiles " \
"WHERE zoom_level=? AND tile_column=? AND tile_row=?"
for row in query(conn, sql, [zoom, x, y]):
print_tile(row[0], show_names, summary, f"{zoom}/{x}/{y}")
break
else:
print(f"Tile {zoom}/{x}/{y} not found")
def _update_metadata(self, metadata, auto_minmax, reset, file, center_zoom=None):
def update_from_env(param, env_var):
val = os.environ.get(env_var)
if val is not None:
metadata[param] = val
update_from_env('name', 'METADATA_NAME')
update_from_env('minzoom', 'MIN_ZOOM')
update_from_env('maxzoom', 'MAX_ZOOM')
update_from_env('attribution', 'METADATA_ATTRIBUTION')
update_from_env('description', 'METADATA_DESCRIPTION')
update_from_env('version', 'METADATA_VERSION')
metadata['filesize'] = os.path.getsize(file)
bbox_str = os.environ.get('BBOX')
if bbox_str:
bbox = Bbox(bbox=bbox_str,
center_zoom=os.environ.get('CENTER_ZOOM', center_zoom))
metadata["bounds"] = bbox.bounds_str()
metadata["center"] = bbox.center_str()
with sqlite3.connect(file) as conn:
cursor = conn.cursor()
if auto_minmax:
cursor.execute("SELECT MIN(zoom_level), MAX(zoom_level) FROM map")
min_z, max_z = cursor.fetchone()
if min_z is None:
raise ValueError("Unable to get min/max zoom - tile data is empty")
metadata["minzoom"] = min_z
metadata["maxzoom"] = max_z
self._update_metadata_db(cursor, metadata, reset)
conn.commit()
print(f"New metadata values in {file}")
self.print_all(file=file)
@staticmethod
def _get_metadata(file) -> Dict[str, str]:
with sqlite3.connect(file) as conn:
return {k: v for k, v in
query(conn, "SELECT name, value FROM metadata", [])}
def _update_metadata_db(self, cursor, metadata, reset):
if reset:
# noinspection SqlWithoutWhere
cursor.execute("DELETE FROM metadata;")
for name, value in metadata.items():
_, is_valid = self.validate(name, value)
if not is_valid:
raise ValueError(f"Invalid {name}={value}")
cursor.execute(
"INSERT OR REPLACE INTO metadata(name, value) VALUES (?, ?);",
[name, value])
def validate(self, name, value):
is_valid = True
if name == 'mtime':
try:
val = datetime.fromtimestamp(int(value) / 1000.0)
value = f'{value} ({val.isoformat()})'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name in ('filesize', 'maskLevel', 'minzoom', 'maxzoom'):
try:
value = f'{int(value):,}'
except ValueError:
is_valid = False
value = f'{value} (invalid)'
elif name == 'json':
try:
val = json.loads(value)
if self.show_json:
value = f'(valid JSON value)'
else:
value = '(The value is a valid JSON, use --show-json for raw dump)'
res = []
for v in val["vector_layers"]:
desc = ""
if "description" in v:
desc = shorten_str(v["description"], 40)
fields = []
names = []
for fld in v["fields"].keys():
if fld.startswith("name:"):
names.append(fld[5:])
else:
fields.append(fld)
fields_str = ", ".join(v for v in fields)
if names:
fields_str += f", name:* ({shorten_str(','.join(names), 20)})"
res.append({
"layer": v["id"],
"minZ": v["minzoom"],
"maxZ": v["maxzoom"],
"fields": fields_str,
"description": desc
})
value += "\n\n" + tabulate(res, headers="keys")
if self.show_json:
value += "\n\n"
value += json.dumps(val, ensure_ascii=False, indent=2)
except ValueError:
is_valid = False
if self.show_json:
value = f'(invalid JSON value)\n{value}'
else:
value = f'(invalid JSON value, use --show-json to see it)'
return value, is_valid
|
import itertools
import logging
import os.path as osp
import tempfile
from collections import OrderedDict
import mmcv
import numpy as np
import pycocotools
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('schwarze_Schraube',)## check mark ##
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
raise AssertionError(
'Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm['name']}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
import itertools
import logging
import os.path as osp
import tempfile
from collections import OrderedDict
import mmcv
import numpy as np
import pycocotools
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('schwarze_Schraube',)## check mark ##
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
raise AssertionError(
'Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
import MySQLdb
import json
from datetime import timedelta, datetime
from unittest.mock import patch, Mock, ANY
import sqlparse
from django.contrib.auth import get_user_model
from django.test import TestCase
from common.config import SysConfig
from sql.engines import EngineBase
from sql.engines.goinception import GoInceptionEngine
from sql.engines.models import ResultSet, ReviewSet, ReviewResult
from sql.engines.mssql import MssqlEngine
from sql.engines.mysql import MysqlEngine
from sql.engines.redis import RedisEngine
from sql.engines.pgsql import PgSQLEngine
from sql.engines.oracle import OracleEngine
from sql.engines.mongo import MongoEngine
from sql.engines.inception import InceptionEngine, _repair_json_str
from sql.models import Instance, SqlWorkflow, SqlWorkflowContent
User = get_user_model()
class TestReviewSet(TestCase):
def test_review_set(self):
new_review_set = ReviewSet()
new_review_set.rows = [{'id': '1679123'}]
self.assertIn('1679123', new_review_set.json())
class TestEngineBase(TestCase):
@classmethod
def setUpClass(cls):
cls.u1 = User(username='some_user', display='用户1')
cls.u1.save()
cls.ins1 = Instance(instance_name='some_ins', type='master', db_type='mssql', host='some_host',
port=1366, user='ins_user', password='some_str')
cls.ins1.save()
cls.wf1 = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer=cls.u1.username,
engineer_display=cls.u1.display,
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=cls.ins1,
db_name='some_db',
syntax_type=1
)
cls.wfc1 = SqlWorkflowContent.objects.create(
workflow=cls.wf1,
sql_content='some_sql',
execute_result=json.dumps([{
'id': 1,
'sql': 'some_content'
}]))
@classmethod
def tearDownClass(cls):
cls.wfc1.delete()
cls.wf1.delete()
cls.ins1.delete()
cls.u1.delete()
def test_init_with_ins(self):
engine = EngineBase(instance=self.ins1)
self.assertEqual(self.ins1.instance_name, engine.instance_name)
self.assertEqual(self.ins1.user, engine.user)
class TestMssql(TestCase):
@classmethod
def setUpClass(cls):
cls.ins1 = Instance(instance_name='some_ins', type='slave', db_type='mssql', host='some_host',
port=1366, user='ins_user', password='some_str')
cls.ins1.save()
cls.engine = MssqlEngine(instance=cls.ins1)
cls.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=cls.ins1,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=cls.wf, sql_content='insert into some_tb values (1)')
@classmethod
def tearDownClass(cls):
cls.ins1.delete()
cls.wf.delete()
SqlWorkflowContent.objects.all().delete()
@patch('sql.engines.mssql.pyodbc.connect')
def testGetConnection(self, connect):
new_engine = MssqlEngine(instance=self.ins1)
new_engine.get_connection()
connect.assert_called_once()
@patch('sql.engines.mssql.pyodbc.connect')
def testQuery(self, connect):
cur = Mock()
connect.return_value.cursor = cur
cur.return_value.execute = Mock()
cur.return_value.fetchmany.return_value = (('v1', 'v2'),)
cur.return_value.description = (('k1', 'some_other_des'), ('k2', 'some_other_des'))
new_engine = MssqlEngine(instance=self.ins1)
query_result = new_engine.query(sql='some_str', limit_num=100)
cur.return_value.execute.assert_called()
cur.return_value.fetchmany.assert_called_once_with(100)
connect.return_value.close.assert_called_once()
self.assertIsInstance(query_result, ResultSet)
@patch.object(MssqlEngine, 'query')
def testAllDb(self, mock_query):
db_result = ResultSet()
db_result.rows = [('db_1',), ('db_2',)]
mock_query.return_value = db_result
new_engine = MssqlEngine(instance=self.ins1)
dbs = new_engine.get_all_databases()
self.assertEqual(dbs.rows, ['db_1', 'db_2'])
@patch.object(MssqlEngine, 'query')
def testAllTables(self, mock_query):
table_result = ResultSet()
table_result.rows = [('tb_1', 'some_des'), ('tb_2', 'some_des')]
mock_query.return_value = table_result
new_engine = MssqlEngine(instance=self.ins1)
tables = new_engine.get_all_tables('some_db')
mock_query.assert_called_once_with(db_name='some_db', sql=ANY)
self.assertEqual(tables.rows, ['tb_1', 'tb_2'])
@patch.object(MssqlEngine, 'query')
def testAllColumns(self, mock_query):
db_result = ResultSet()
db_result.rows = [('col_1', 'type'), ('col_2', 'type2')]
mock_query.return_value = db_result
new_engine = MssqlEngine(instance=self.ins1)
dbs = new_engine.get_all_columns_by_tb('some_db', 'some_tb')
self.assertEqual(dbs.rows, ['col_1', 'col_2'])
@patch.object(MssqlEngine, 'query')
def testDescribe(self, mock_query):
new_engine = MssqlEngine(instance=self.ins1)
new_engine.describe_table('some_db', 'some_db')
mock_query.assert_called_once()
def testQueryCheck(self):
new_engine = MssqlEngine(instance=self.ins1)
# 只抽查一个函数
banned_sql = 'select concat(phone,1) from user_table'
check_result = new_engine.query_check(db_name='some_db', sql=banned_sql)
self.assertTrue(check_result.get('bad_query'))
banned_sql = 'select phone from user_table where phone=concat(phone,1)'
check_result = new_engine.query_check(db_name='some_db', sql=banned_sql)
self.assertTrue(check_result.get('bad_query'))
sp_sql = "sp_helptext '[SomeName].[SomeAction]'"
check_result = new_engine.query_check(db_name='some_db', sql=sp_sql)
self.assertFalse(check_result.get('bad_query'))
self.assertEqual(check_result.get('filtered_sql'), sp_sql)
def test_filter_sql(self):
new_engine = MssqlEngine(instance=self.ins1)
# 只抽查一个函数
banned_sql = 'select user from user_table'
check_result = new_engine.filter_sql(sql=banned_sql, limit_num=10)
self.assertEqual(check_result, "select top 10 user from user_table")
def test_execute_check(self):
new_engine = MssqlEngine(instance=self.ins1)
test_sql = 'use database\ngo\nsome sql1\nGO\nsome sql2\n\r\nGo\nsome sql3\n\r\ngO\n'
check_result = new_engine.execute_check(db_name=None, sql=test_sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[1].__dict__['sql'], "use database\n")
self.assertEqual(check_result.rows[2].__dict__['sql'], "\nsome sql1\n")
self.assertEqual(check_result.rows[4].__dict__['sql'], "\nsome sql3\n\r\n")
@patch('sql.engines.mssql.MssqlEngine.execute')
def test_execute_workflow(self, mock_execute):
mock_execute.return_value.error = None
new_engine = MssqlEngine(instance=self.ins1)
new_engine.execute_workflow(self.wf)
# 有多少个备份表, 就需要execute多少次, 另外加上一条实际执行的次数
mock_execute.assert_called()
self.assertEqual(1, mock_execute.call_count)
@patch('sql.engines.mssql.MssqlEngine.get_connection')
def test_execute(self, mock_connect):
mock_cursor = Mock()
mock_connect.return_value.cursor = mock_cursor
new_engine = MssqlEngine(instance=self.ins1)
execute_result = new_engine.execute('some_db', 'some_sql')
# 验证结果, 无异常
self.assertIsNone(execute_result.error)
self.assertEqual('some_sql', execute_result.full_sql)
self.assertEqual(2, len(execute_result.rows))
mock_cursor.return_value.execute.assert_called()
mock_cursor.return_value.commit.assert_called()
mock_cursor.reset_mock()
# 验证异常
mock_cursor.return_value.execute.side_effect = Exception('Boom! some exception!')
execute_result = new_engine.execute('some_db', 'some_sql')
self.assertIn('Boom! some exception!', execute_result.error)
self.assertEqual('some_sql', execute_result.full_sql)
self.assertEqual(2, len(execute_result.rows))
mock_cursor.return_value.commit.assert_not_called()
mock_cursor.return_value.rollback.assert_called()
class TestMysql(TestCase):
def setUp(self):
self.ins1 = Instance(instance_name='some_ins', type='slave', db_type='mysql', host='some_host',
port=1366, user='ins_user', password='some_str')
self.ins1.save()
self.sys_config = SysConfig()
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins1,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
def tearDown(self):
self.ins1.delete()
self.sys_config.purge()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('MySQLdb.connect')
def test_engine_base_info(self, _conn):
new_engine = MysqlEngine(instance=self.ins1)
self.assertEqual(new_engine.name, 'MySQL')
self.assertEqual(new_engine.info, 'MySQL engine')
@patch('MySQLdb.connect')
def testGetConnection(self, connect):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.get_connection()
connect.assert_called_once()
@patch('MySQLdb.connect')
def testQuery(self, connect):
cur = Mock()
connect.return_value.cursor = cur
cur.return_value.execute = Mock()
cur.return_value.fetchmany.return_value = (('v1', 'v2'),)
cur.return_value.description = (('k1', 'some_other_des'), ('k2', 'some_other_des'))
new_engine = MysqlEngine(instance=self.ins1)
query_result = new_engine.query(sql='some_str', limit_num=100)
cur.return_value.execute.assert_called()
cur.return_value.fetchmany.assert_called_once_with(size=100)
connect.return_value.close.assert_called_once()
self.assertIsInstance(query_result, ResultSet)
@patch.object(MysqlEngine, 'query')
def testAllDb(self, mock_query):
db_result = ResultSet()
db_result.rows = [('db_1',), ('db_2',)]
mock_query.return_value = db_result
new_engine = MysqlEngine(instance=self.ins1)
dbs = new_engine.get_all_databases()
self.assertEqual(dbs.rows, ['db_1', 'db_2'])
@patch.object(MysqlEngine, 'query')
def testAllTables(self, mock_query):
table_result = ResultSet()
table_result.rows = [('tb_1', 'some_des'), ('tb_2', 'some_des')]
mock_query.return_value = table_result
new_engine = MysqlEngine(instance=self.ins1)
tables = new_engine.get_all_tables('some_db')
mock_query.assert_called_once_with(db_name='some_db', sql=ANY)
self.assertEqual(tables.rows, ['tb_1', 'tb_2'])
@patch.object(MysqlEngine, 'query')
def testAllColumns(self, mock_query):
db_result = ResultSet()
db_result.rows = [('col_1', 'type'), ('col_2', 'type2')]
mock_query.return_value = db_result
new_engine = MysqlEngine(instance=self.ins1)
dbs = new_engine.get_all_columns_by_tb('some_db', 'some_tb')
self.assertEqual(dbs.rows, ['col_1', 'col_2'])
@patch.object(MysqlEngine, 'query')
def testDescribe(self, mock_query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.describe_table('some_db', 'some_db')
mock_query.assert_called_once()
def testQueryCheck(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = '-- 测试\n select user from usertable'
check_result = new_engine.query_check(db_name='some_db', sql=sql_without_limit)
self.assertEqual(check_result['filtered_sql'], 'select user from usertable')
def test_query_check_wrong_sql(self):
new_engine = MysqlEngine(instance=self.ins1)
wrong_sql = '-- 测试'
check_result = new_engine.query_check(db_name='some_db', sql=wrong_sql)
self.assertDictEqual(check_result,
{'msg': '不支持的查询语法类型!', 'bad_query': True, 'filtered_sql': '-- 测试', 'has_star': False})
def test_query_check_update_sql(self):
new_engine = MysqlEngine(instance=self.ins1)
update_sql = 'update user set id=0'
check_result = new_engine.query_check(db_name='some_db', sql=update_sql)
self.assertDictEqual(check_result,
{'msg': '不支持的查询语法类型!', 'bad_query': True, 'filtered_sql': 'update user set id=0',
'has_star': False})
def test_filter_sql_with_delimiter(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable;'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=100)
self.assertEqual(check_result, 'select user from usertable limit 100;')
def test_filter_sql_without_delimiter(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=100)
self.assertEqual(check_result, 'select user from usertable limit 100;')
def test_filter_sql_with_limit(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable limit 10'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=1)
self.assertEqual(check_result, 'select user from usertable limit 1;')
def test_filter_sql_with_limit_min(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable limit 10'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=100)
self.assertEqual(check_result, 'select user from usertable limit 10;')
def test_filter_sql_with_limit_offset(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable limit 10 offset 100'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=1)
self.assertEqual(check_result, 'select user from usertable limit 1;')
def test_filter_sql_with_limit_nn(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable limit 10, 100'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=1)
self.assertEqual(check_result, 'select user from usertable limit 1;')
def test_filter_sql_upper(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'SELECT USER FROM usertable LIMIT 10, 100'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=1)
self.assertEqual(check_result, 'SELECT USER FROM usertable limit 1;')
def test_filter_sql_not_select(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'show create table usertable;'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=1)
self.assertEqual(check_result, 'show create table usertable;')
@patch('sql.engines.mysql.data_masking', return_value=ResultSet())
def test_query_masking(self, _data_masking):
query_result = ResultSet()
new_engine = MysqlEngine(instance=self.ins1)
masking_result = new_engine.query_masking(db_name='archery', sql='select 1', resultset=query_result)
self.assertIsInstance(masking_result, ResultSet)
@patch('sql.engines.mysql.data_masking', return_value=ResultSet())
def test_query_masking_not_select(self, _data_masking):
query_result = ResultSet()
new_engine = MysqlEngine(instance=self.ins1)
masking_result = new_engine.query_masking(db_name='archery', sql='explain select 1', resultset=query_result)
self.assertEqual(masking_result, query_result)
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_check_select_sql(self, _inception_engine):
self.sys_config.set('inception', 'true')
sql = 'select * from user'
inc_row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0, )
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回不支持语句',
errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
sql=sql)
_inception_engine.return_value.execute_check.return_value = ReviewSet(full_sql=sql, rows=[inc_row])
new_engine = MysqlEngine(instance=self.ins1)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_check_critical_sql(self, _inception_engine):
self.sys_config.set('inception', 'true')
self.sys_config.set('critical_ddl_regex', '^|update')
self.sys_config.get_all_config()
sql = 'update user set id=1'
inc_row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0, )
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回高危SQL',
errormessage='禁止提交匹配' + '^|update' + '条件的语句!',
sql=sql)
_inception_engine.return_value.execute_check.return_value = ReviewSet(full_sql=sql, rows=[inc_row])
new_engine = MysqlEngine(instance=self.ins1)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_check_normal_sql(self, _inception_engine):
self.sys_config.set('inception', 'true')
sql = 'update user set id=1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0, )
_inception_engine.return_value.execute_check.return_value = ReviewSet(full_sql=sql, rows=[row])
new_engine = MysqlEngine(instance=self.ins1)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_check_normal_sql_with_Exception(self, _inception_engine):
sql = 'update user set id=1'
_inception_engine.return_value.execute_check.side_effect = RuntimeError()
new_engine = MysqlEngine(instance=self.ins1)
with self.assertRaises(RuntimeError):
new_engine.execute_check(db_name=0, sql=sql)
@patch.object(MysqlEngine, 'query')
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_workflow(self, _inception_engine, _query):
self.sys_config.set('inception', 'true')
sql = 'update user set id=1'
_inception_engine.return_value.execute.return_value = ReviewSet(full_sql=sql)
_query.return_value.rows = (('0',),)
new_engine = MysqlEngine(instance=self.ins1)
execute_result = new_engine.execute_workflow(self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_execute(self, _connect, _cursor, _execute):
new_engine = MysqlEngine(instance=self.ins1)
execute_result = new_engine.execute(self.wf)
self.assertIsInstance(execute_result, ResultSet)
@patch('MySQLdb.connect')
def test_server_version(self, _connect):
_connect.return_value.get_server_info.return_value = '5.7.20-16log'
new_engine = MysqlEngine(instance=self.ins1)
server_version = new_engine.server_version
self.assertTupleEqual(server_version, (5, 7, 20))
@patch.object(MysqlEngine, 'query')
def test_get_variables_not_filter(self, _query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.get_variables()
_query.assert_called_once()
@patch('MySQLdb.connect')
@patch.object(MysqlEngine, 'query')
def test_get_variables_filter(self, _query, _connect):
_connect.return_value.get_server_info.return_value = '5.7.20-16log'
new_engine = MysqlEngine(instance=self.ins1)
new_engine.get_variables(variables=['binlog_format'])
_query.assert_called()
@patch.object(MysqlEngine, 'query')
def test_set_variable(self, _query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.set_variable('binlog_format', 'ROW')
_query.assert_called_once_with(sql="set global binlog_format=ROW;")
@patch('sql.engines.mysql.GoInceptionEngine')
def test_osc_go_inception(self, _inception_engine):
self.sys_config.set('inception', 'false')
_inception_engine.return_value.osc_control.return_value = ReviewSet()
command = 'get'
sqlsha1 = 'xxxxx'
new_engine = MysqlEngine(instance=self.ins1)
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
@patch('sql.engines.mysql.InceptionEngine')
def test_osc_inception(self, _inception_engine):
self.sys_config.set('inception', 'true')
_inception_engine.return_value.osc_control.return_value = ReviewSet()
command = 'get'
sqlsha1 = 'xxxxx'
new_engine = MysqlEngine(instance=self.ins1)
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
@patch.object(MysqlEngine, 'query')
def test_kill_connection(self, _query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.kill_connection(100)
_query.assert_called_once_with(sql="kill 100")
@patch.object(MysqlEngine, 'query')
def test_seconds_behind_master(self, _query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.seconds_behind_master
_query.assert_called_once_with(sql="show slave status", close_conn=False,
cursorclass=MySQLdb.cursors.DictCursor)
class TestRedis(TestCase):
@classmethod
def setUpClass(cls):
cls.ins = Instance(instance_name='some_ins', type='slave', db_type='redis', host='some_host',
port=1366, user='ins_user', password='some_str')
cls.ins.save()
@classmethod
def tearDownClass(cls):
cls.ins.delete()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('redis.Redis')
def test_engine_base_info(self, _conn):
new_engine = RedisEngine(instance=self.ins)
self.assertEqual(new_engine.name, 'Redis')
self.assertEqual(new_engine.info, 'Redis engine')
@patch('redis.Redis')
def test_get_connection(self, _conn):
new_engine = RedisEngine(instance=self.ins)
new_engine.get_connection()
_conn.assert_called_once()
@patch('redis.Redis.execute_command', return_value=[1, 2, 3])
def test_query_return_list(self, _execute_command):
new_engine = RedisEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='keys *', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertTupleEqual(query_result.rows, ([1], [2], [3]))
@patch('redis.Redis.execute_command', return_value='text')
def test_query_return_str(self, _execute_command):
new_engine = RedisEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='keys *', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertTupleEqual(query_result.rows, (['text'],))
@patch('redis.Redis.execute_command', return_value='text')
def test_query_execute(self, _execute_command):
new_engine = RedisEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='keys *', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertTupleEqual(query_result.rows, (['text'],))
@patch('redis.Redis.config_get', return_value={"databases": 4})
def test_get_all_databases(self, _config_get):
new_engine = RedisEngine(instance=self.ins)
dbs = new_engine.get_all_databases()
self.assertListEqual(dbs.rows, ['0', '1', '2', '3'])
def test_query_check_safe_cmd(self):
safe_cmd = "keys 1*"
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.query_check(db_name=0, sql=safe_cmd)
self.assertDictEqual(check_result,
{'msg': '禁止执行该命令!', 'bad_query': True, 'filtered_sql': safe_cmd, 'has_star': False})
def test_query_check_danger_cmd(self):
safe_cmd = "keys *"
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.query_check(db_name=0, sql=safe_cmd)
self.assertDictEqual(check_result,
{'msg': '禁止执行该命令!', 'bad_query': True, 'filtered_sql': safe_cmd, 'has_star': False})
def test_filter_sql(self):
safe_cmd = "keys 1*"
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=safe_cmd, limit_num=100)
self.assertEqual(check_result, 'keys 1*')
def test_query_masking(self):
query_result = ResultSet()
new_engine = RedisEngine(instance=self.ins)
masking_result = new_engine.query_masking(db_name=0, sql='', resultset=query_result)
self.assertEqual(masking_result, query_result)
def test_execute_check(self):
sql = 'set 1 1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0)
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name=0, sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('redis.Redis.execute_command', return_value='text')
def test_execute_workflow_success(self, _execute_command):
sql = 'set 1 1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0)
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql)
new_engine = RedisEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), row.__dict__.keys())
class TestPgSQL(TestCase):
@classmethod
def setUpClass(cls):
cls.ins = Instance(instance_name='some_ins', type='slave', db_type='pgsql', host='some_host',
port=1366, user='ins_user', password='some_str')
cls.ins.save()
cls.sys_config = SysConfig()
@classmethod
def tearDownClass(cls):
cls.ins.delete()
cls.sys_config.purge()
@patch('psycopg2.connect')
def test_engine_base_info(self, _conn):
new_engine = PgSQLEngine(instance=self.ins)
self.assertEqual(new_engine.name, 'PgSQL')
self.assertEqual(new_engine.info, 'PgSQL engine')
@patch('psycopg2.connect')
def test_get_connection(self, _conn):
new_engine = PgSQLEngine(instance=self.ins)
new_engine.get_connection("some_dbname")
_conn.assert_called_once()
@patch('psycopg2.connect.cursor.execute')
@patch('psycopg2.connect.cursor')
@patch('psycopg2.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchmany.return_value = [(1,)]
new_engine = PgSQLEngine(instance=self.ins)
query_result = new_engine.query(db_name="some_dbname", sql='select 1', limit_num=100, schema_name="some_schema")
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('psycopg2.connect.cursor.execute')
@patch('psycopg2.connect.cursor')
@patch('psycopg2.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = PgSQLEngine(instance=self.ins)
query_result = new_engine.query(db_name="some_dbname", sql='select 1', limit_num=0, schema_name="some_schema")
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('postgres',), ('archery',), ('template1',), ('template0',)]))
def test_get_all_databases(self, query):
new_engine = PgSQLEngine(instance=self.ins)
dbs = new_engine.get_all_databases()
self.assertListEqual(dbs.rows, ['archery'])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('information_schema',), ('archery',), ('pg_catalog',)]))
def test_get_all_schemas(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
schemas = new_engine.get_all_schemas(db_name='archery')
self.assertListEqual(schemas.rows, ['archery'])
@patch('sql.engines.pgsql.PgSQLEngine.query', return_value=ResultSet(rows=[('test',), ('test2',)]))
def test_get_all_tables(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
tables = new_engine.get_all_tables(db_name='archery', schema_name='archery')
self.assertListEqual(tables.rows, ['test2'])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('id',), ('name',)]))
def test_get_all_columns_by_tb(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
columns = new_engine.get_all_columns_by_tb(db_name='archery', tb_name='test2', schema_name='archery')
self.assertListEqual(columns.rows, ['id', 'name'])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('postgres',), ('archery',), ('template1',), ('template0',)]))
def test_describe_table(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
describe = new_engine.describe_table(db_name='archery', schema_name='archery', tb_name='text')
self.assertIsInstance(describe, ResultSet)
def test_query_check_disable_sql(self):
sql = "update xxx set a=1 "
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '不支持的查询语法类型!', 'bad_query': True, 'filtered_sql': sql.strip(), 'has_star': False})
def test_query_check_star_sql(self):
sql = "select * from xx "
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': 'SQL语句中含有 * ', 'bad_query': False, 'filtered_sql': sql.strip(), 'has_star': True})
def test_filter_sql_with_delimiter(self):
sql = "select * from xx;"
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select * from xx limit 100;")
def test_filter_sql_without_delimiter(self):
sql = "select * from xx"
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select * from xx limit 100;")
def test_filter_sql_with_limit(self):
sql = "select * from xx limit 10"
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=1)
self.assertEqual(check_result, "select * from xx limit 10;")
def test_query_masking(self):
query_result = ResultSet()
new_engine = PgSQLEngine(instance=self.ins)
masking_result = new_engine.query_masking(db_name=0, sql='', resultset=query_result)
self.assertEqual(masking_result, query_result)
def test_execute_check_select_sql(self):
sql = 'select * from user;'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回不支持语句',
errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
sql=sql)
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
def test_execute_check_critical_sql(self):
self.sys_config.set('critical_ddl_regex', '^|update')
self.sys_config.get_all_config()
sql = 'update user set id=1'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回高危SQL',
errormessage='禁止提交匹配' + '^|update' + '条件的语句!',
sql=sql)
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
def test_execute_check_normal_sql(self):
self.sys_config.purge()
sql = 'alter table tb set id=1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0, )
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('psycopg2.connect.cursor.execute')
@patch('psycopg2.connect.cursor')
@patch('psycopg2.connect')
def test_execute_workflow_success(self, _conn, _cursor, _execute):
sql = 'update user set id=1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0)
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql)
new_engine = PgSQLEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), row.__dict__.keys())
@patch('psycopg2.connect.cursor.execute')
@patch('psycopg2.connect.cursor')
@patch('psycopg2.connect', return_value=RuntimeError)
def test_execute_workflow_exception(self, _conn, _cursor, _execute):
sql = 'update user set id=1'
row = ReviewResult(id=1,
errlevel=2,
stagestatus='Execute Failed',
errormessage=f'异常信息:{f'Oracle命令执行报错,语句:{sql}'}',
sql=sql,
affected_rows=0,
execute_time=0, )
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql)
with self.assertRaises(AttributeError):
new_engine = PgSQLEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), row.__dict__.keys())
class TestModel(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_result_set_rows_shadow(self):
# 测试默认值为空列表的坑
# 如果默认值是空列表,又使用的是累加的方法更新,会导致残留上次的列表
result_set1 = ResultSet()
for i in range(10):
result_set1.rows += [i]
brand_new_result_set = ResultSet()
self.assertEqual(brand_new_result_set.rows, [])
review_set1 = ReviewSet()
for i in range(10):
review_set1.rows += [i]
brand_new_review_set = ReviewSet()
self.assertEqual(brand_new_review_set.rows, [])
class TestInception(TestCase):
def setUp(self):
self.ins = Instance.objects.create(instance_name='some_ins', type='slave', db_type='mysql', host='some_host',
port=3306, user='ins_user', password='some_str')
self.ins_inc = Instance.objects.create(instance_name='some_ins_inc', type='slave', db_type='inception',
host='some_host', port=6669)
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
def tearDown(self):
self.ins.delete()
self.ins_inc.delete()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('MySQLdb.connect')
def test_get_connection(self, _connect):
new_engine = InceptionEngine()
new_engine.get_connection()
_connect.assert_called_once()
@patch('MySQLdb.connect')
def test_get_backup_connection(self, _connect):
new_engine = InceptionEngine()
new_engine.get_backup_connection()
_connect.assert_called_once()
@patch('sql.engines.inception.InceptionEngine.query')
def test_execute_check_normal_sql(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Audit completed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '']
_query.return_value = ResultSet(full_sql=sql, rows=[row])
new_engine = InceptionEngine()
check_result = new_engine.execute_check(instance=self.ins, db_name=0, sql=sql)
self.assertIsInstance(check_result, ReviewSet)
@patch('sql.engines.inception.InceptionEngine.query')
def test_execute_exception(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 1, 'Execute failed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '']
column_list = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows', 'sequence',
'backup_dbname', 'execute_time', 'sqlsha1']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = InceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('sql.engines.inception.InceptionEngine.query')
def test_execute_finish(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Execute Successfully', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '']
column_list = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows', 'sequence',
'backup_dbname', 'execute_time', 'sqlsha1']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = InceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = InceptionEngine()
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = InceptionEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=0)
self.assertIsInstance(query_result, ResultSet)
@patch('sql.engines.inception.InceptionEngine.query')
def test_query_print(self, _query):
sql = 'update user set id=100'
row = [1,
'select * from sql_instance limit 100',
0,
'{"command":"select","select_list":[{"type":"FIELD_ITEM","field":"*"}],"table_ref":[{"db":"archery","table":"sql_instance"}],"limit":{"limit":[{"type":"INT_ITEM","value":"100"}]}}',
'None']
column_list = ['ID', 'statement', 'errlevel', 'query_tree', 'errmsg']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = InceptionEngine()
print_result = new_engine.query_print(self.ins, db_name=None, sql=sql)
self.assertDictEqual(print_result, json.loads(_repair_json_str(row[3])))
@patch('MySQLdb.connect')
def test_get_rollback_list(self, _connect):
self.wf.sqlworkflowcontent.execute_result = """[{
"id": 1,
"stage": "RERUN",
"errlevel": 0,
"stagestatus": "Execute Successfully",
"errormessage": "None",
"sql": "use archer_test",
"affected_rows": 0,
"sequence": "'1554135032_13038_0'",
"backup_dbname": "None",
"execute_time": "0.000",
"sqlsha1": "",
"actual_affected_rows": 0
}, {
"id": 2,
"stage": "EXECUTED",
"errlevel": 0,
"stagestatus": "Execute Successfully Backup successfully",
"errormessage": "None",
"sql": "insert into tt1 (user_name)values('A'),('B'),('C')",
"affected_rows": 3,
"sequence": "'1554135032_13038_1'",
"backup_dbname": "mysql_3306_archer_test",
"execute_time": "0.000",
"sqlsha1": "",
"actual_affected_rows": 3
}]"""
self.wf.sqlworkflowcontent.save()
new_engine = InceptionEngine()
new_engine.get_rollback(self.wf)
@patch('sql.engines.inception.InceptionEngine.query')
def test_osc_get(self, _query):
new_engine = InceptionEngine()
command = 'get'
sqlsha1 = 'xxxxx'
sql = f"inception get osc_percent '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_osc_kill(self, _query):
new_engine = InceptionEngine()
command = 'kill'
sqlsha1 = 'xxxxx'
sql = f"inception stop alter '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_osc_not_support(self, _query):
new_engine = InceptionEngine()
command = 'stop'
sqlsha1 = 'xxxxx'
sql = f"inception stop alter '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
with self.assertRaisesMessage(ValueError, 'pt-osc不支持暂停和恢复,需要停止执行请使用终止按钮!'):
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
@patch('sql.engines.inception.InceptionEngine.query')
def test_get_variables(self, _query):
new_engine = InceptionEngine(instance=self.ins_inc)
new_engine.get_variables()
sql = f"inception get variables;"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_get_variables_filter(self, _query):
new_engine = InceptionEngine(instance=self.ins_inc)
new_engine.get_variables(variables=['inception_osc_on'])
sql = f"inception get variables 'inception_osc_on';"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_set_variable(self, _query):
new_engine = InceptionEngine(instance=self.ins)
new_engine.set_variable('inception_osc_on', 'on')
_query.assert_called_once_with(sql="inception set inception_osc_on=on;")
class TestGoInception(TestCase):
def setUp(self):
self.ins = Instance.objects.create(instance_name='some_ins', type='slave', db_type='mysql',
host='some_host',
port=3306, user='ins_user', password='some_str')
self.ins_inc = Instance.objects.create(instance_name='some_ins_inc', type='slave', db_type='goinception',
host='some_host', port=4000)
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
def tearDown(self):
self.ins.delete()
self.ins_inc.delete()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('MySQLdb.connect')
def test_get_connection(self, _connect):
new_engine = GoInceptionEngine()
new_engine.get_connection()
_connect.assert_called_once()
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_execute_check_normal_sql(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Audit completed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '', '']
_query.return_value = ResultSet(full_sql=sql, rows=[row])
new_engine = GoInceptionEngine()
check_result = new_engine.execute_check(instance=self.ins, db_name=0, sql=sql)
self.assertIsInstance(check_result, ReviewSet)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_execute_exception(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 1, 'Execute failed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '', '']
column_list = ['order_id', 'stage', 'error_level', 'stage_status', 'error_message', 'sql',
'affected_rows', 'sequence', 'backup_dbname', 'execute_time', 'sqlsha1', 'backup_time']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = GoInceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_execute_finish(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Execute Successfully', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '', '']
column_list = ['order_id', 'stage', 'error_level', 'stage_status', 'error_message', 'sql',
'affected_rows', 'sequence', 'backup_dbname', 'execute_time', 'sqlsha1', 'backup_time']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = GoInceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = GoInceptionEngine()
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = GoInceptionEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=0)
self.assertIsInstance(query_result, ResultSet)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_get(self, _query):
new_engine = GoInceptionEngine()
command = 'get'
sqlsha1 = 'xxxxx'
sql = f"inception get osc_percent '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_pause(self, _query):
new_engine = GoInceptionEngine()
command = 'pause'
sqlsha1 = 'xxxxx'
sql = f"inception {command} osc '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_resume(self, _query):
new_engine = GoInceptionEngine()
command = 'resume'
sqlsha1 = 'xxxxx'
sql = f"inception {command} osc '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_kill(self, _query):
new_engine = GoInceptionEngine()
command = 'kill'
sqlsha1 = 'xxxxx'
sql = f"inception kill osc '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_get_variables(self, _query):
new_engine = GoInceptionEngine(instance=self.ins_inc)
new_engine.get_variables()
sql = f"inception get variables;"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_get_variables_filter(self, _query):
new_engine = GoInceptionEngine(instance=self.ins_inc)
new_engine.get_variables(variables=['inception_osc_on'])
sql = f"inception get variables like 'inception_osc_on';"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_set_variable(self, _query):
new_engine = GoInceptionEngine(instance=self.ins)
new_engine.set_variable('inception_osc_on', 'on')
_query.assert_called_once_with(sql="inception set inception_osc_on=on;")
class TestOracle(TestCase):
"""Oracle 测试"""
def setUp(self):
self.ins = Instance.objects.create(instance_name='some_ins', type='slave', db_type='oracle',
host='some_host', port=3306, user='ins_user', password='some_str',
sid='some_id')
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
self.sys_config = SysConfig()
def tearDown(self):
self.ins.delete()
self.sys_config.purge()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('cx_Oracle.makedsn')
@patch('cx_Oracle.connect')
def test_get_connection(self, _connect, _makedsn):
# 填写 sid 测试
new_engine = OracleEngine(self.ins)
new_engine.get_connection()
_connect.assert_called_once()
_makedsn.assert_called_once()
# 填写 service_name 测试
_connect.reset_mock()
_makedsn.reset_mock()
self.ins.service_name = 'some_service'
self.ins.sid = ''
self.ins.save()
new_engine = OracleEngine(self.ins)
new_engine.get_connection()
_connect.assert_called_once()
_makedsn.assert_called_once()
# 都不填写, 检测 ValueError
_connect.reset_mock()
_makedsn.reset_mock()
self.ins.service_name = ''
self.ins.sid = ''
self.ins.save()
new_engine = OracleEngine(self.ins)
with self.assertRaises(ValueError):
new_engine.get_connection()
@patch('cx_Oracle.connect')
def test_engine_base_info(self, _conn):
new_engine = OracleEngine(instance=self.ins)
self.assertEqual(new_engine.name, 'Oracle')
self.assertEqual(new_engine.info, 'Oracle engine')
_conn.return_value.version = '12.1.0.2.0'
self.assertTupleEqual(new_engine.server_version, ('12', '1', '0'))
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchmany.return_value = [(1,)]
new_engine = OracleEngine(instance=self.ins)
query_result = new_engine.query(db_name='archery', sql='select 1', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = OracleEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=0)
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('AUD_SYS',), ('archery',), ('ANONYMOUS',)]))
def test_get_all_databases(self, _query):
new_engine = OracleEngine(instance=self.ins)
dbs = new_engine.get_all_databases()
self.assertListEqual(dbs.rows, ['archery'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('AUD_SYS',), ('archery',), ('ANONYMOUS',)]))
def test__get_all_databases(self, _query):
new_engine = OracleEngine(instance=self.ins)
dbs = new_engine._get_all_databases()
self.assertListEqual(dbs.rows, ['AUD_SYS', 'archery', 'ANONYMOUS'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('archery',)]))
def test__get_all_instances(self, _query):
new_engine = OracleEngine(instance=self.ins)
dbs = new_engine._get_all_instances()
self.assertListEqual(dbs.rows, ['archery'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('ANONYMOUS',), ('archery',), ('SYSTEM',)]))
def test_get_all_schemas(self, _query):
new_engine = OracleEngine(instance=self.ins)
schemas = new_engine._get_all_schemas()
self.assertListEqual(schemas.rows, ['archery'])
@patch('sql.engines.oracle.OracleEngine.query', return_value=ResultSet(rows=[('test',), ('test2',)]))
def test_get_all_tables(self, _query):
new_engine = OracleEngine(instance=self.ins)
tables = new_engine.get_all_tables(db_name='archery')
self.assertListEqual(tables.rows, ['test2'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('id',), ('name',)]))
def test_get_all_columns_by_tb(self, _query):
new_engine = OracleEngine(instance=self.ins)
columns = new_engine.get_all_columns_by_tb(db_name='archery', tb_name='test2')
self.assertListEqual(columns.rows, ['id', 'name'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('archery',), ('template1',), ('template0',)]))
def test_describe_table(self, _query):
new_engine = OracleEngine(instance=self.ins)
describe = new_engine.describe_table(db_name='archery', tb_name='text')
self.assertIsInstance(describe, ResultSet)
def test_query_check_disable_sql(self):
sql = "update xxx set a=1;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '不支持语法!', 'bad_query': True, 'filtered_sql': sql.strip(';'),
'has_star': False})
@patch('sql.engines.oracle.OracleEngine.explain_check', return_value={'msg': '', 'rows': 0})
def test_query_check_star_sql(self, _explain_check):
sql = "select * from xx;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '禁止使用 * 关键词\n', 'bad_query': False, 'filtered_sql': sql.strip(';'),
'has_star': True})
def test_query_check_IndexError(self):
sql = ""
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '没有有效的SQL语句', 'bad_query': True, 'filtered_sql': sql.strip(), 'has_star': False})
@patch('sql.engines.oracle.OracleEngine.explain_check', return_value={'msg': '', 'rows': 0})
def test_query_check_plus(self, _explain_check):
sql = "select 100+1 from tb;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '禁止使用 + 关键词\n', 'bad_query': True, 'filtered_sql': sql.strip(';'),
'has_star': False})
def test_filter_sql_with_delimiter(self):
sql = "select * from xx;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select sql_audit.* from (select * from xx) sql_audit where rownum <= 100")
def test_filter_sql_with_delimiter_and_where(self):
sql = "select * from xx where id>1;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result,
"select sql_audit.* from (select * from xx where id>1) sql_audit where rownum <= 100")
def test_filter_sql_without_delimiter(self):
sql = "select * from xx;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select sql_audit.* from (select * from xx) sql_audit where rownum <= 100")
def test_filter_sql_with_limit(self):
sql = "select * from xx limit 10;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=1)
self.assertEqual(check_result,
"select sql_audit.* from (select * from xx limit 10) sql_audit where rownum <= 1")
def test_query_masking(self):
query_result = ResultSet()
new_engine = OracleEngine(instance=self.ins)
masking_result = new_engine.query_masking(schema_name='', sql='select 1', resultset=query_result)
self.assertEqual(masking_result, query_result)
def test_execute_check_select_sql(self):
sql = 'select * from user;'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回不支持语句',
errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
sql=sqlparse.format(sql, strip_comments=True, reindent=True, keyword_case='lower'))
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
def test_execute_check_critical_sql(self):
self.sys_config.set('critical_ddl_regex', '^|update')
self.sys_config.get_all_config()
sql = 'update user set id=1'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回高危SQL',
errormessage='禁止提交匹配' + '^|update' + '条件的语句!',
sql=sqlparse.format(sql, strip_comments=True, reindent=True, keyword_case='lower'))
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('sql.engines.oracle.OracleEngine.explain_check', return_value={'msg': '', 'rows': 0})
@patch('sql.engines.oracle.OracleEngine.get_sql_first_object_name', return_value='tb')
@patch('sql.engines.oracle.OracleEngine.object_name_check', return_value=True)
def test_execute_check_normal_sql(self, _explain_check, _get_sql_first_object_name, _object_name_check):
self.sys_config.purge()
sql = 'alter table tb set id=1'
row = ReviewResult(id=1,
errlevel=1,
stagestatus='当前平台,此语法不支持审核!',
errormessage='当前平台,此语法不支持审核!',
sql=sqlparse.format(sql, strip_comments=True, reindent=True, keyword_case='lower'),
affected_rows=0,
execute_time=0,
stmt_type='SQL',
object_owner='',
object_type='',
object_name='',
)
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect')
def test_execute_workflow_success(self, _conn, _cursor, _execute):
sql = 'update user set id=1'
review_row = ReviewResult(id=1,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0,
stmt_type='SQL',
object_owner='',
object_type='',
object_name='', )
execute_row = ReviewResult(id=1,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0)
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql,
review_content=ReviewSet(rows=[review_row]).json())
new_engine = OracleEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), execute_row.__dict__.keys())
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect', return_value=RuntimeError)
def test_execute_workflow_exception(self, _conn, _cursor, _execute):
sql = 'update user set id=1'
row = ReviewResult(id=1,
errlevel=2,
stagestatus='Execute Failed',
errormessage=f'异常信息:{f'Oracle命令执行报错,语句:{sql}'}',
sql=sql,
affected_rows=0,
execute_time=0,
stmt_type='SQL',
object_owner='',
object_type='',
object_name='',
)
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql, review_content=ReviewSet(rows=[row]).json())
with self.assertRaises(AttributeError):
new_engine = OracleEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), row.__dict__.keys())
class MongoTest(TestCase):
def setUp(self) -> None:
self.ins = Instance.objects.create(instance_name='some_ins', type='slave', db_type='mongo',
host='some_host', port=3306, user='ins_user')
self.engine = MongoEngine(instance=self.ins)
def tearDown(self) -> None:
self.ins.delete()
@patch('sql.engines.mongo.pymongo')
def test_get_connection(self, mock_pymongo):
_ = self.engine.get_connection()
mock_pymongo.MongoClient.assert_called_once()
@patch('sql.engines.mongo.MongoEngine.get_connection')
def test_query(self, mock_get_connection):
# TODO 正常查询还没做
test_sql = """{"collection": "job","count": true}"""
self.assertIsInstance(self.engine.query('archery', test_sql), ResultSet)
def test_query_check(self):
test_sql = """{"collection": "job","count": true}"""
check_result = self.engine.query_check(sql=test_sql)
self.assertEqual(False, check_result.get('bad_query'))
@patch('sql.engines.mongo.MongoEngine.get_connection')
def test_get_all_databases(self, mock_get_connection):
db_list = self.engine.get_all_databases()
self.assertIsInstance(db_list, ResultSet)
# mock_get_connection.return_value.list_database_names.assert_called_once()
@patch('sql.engines.mongo.MongoEngine.get_connection')
def test_get_all_tables(self, mock_get_connection):
mock_db = Mock()
# 下面是查表示例返回结果
mock_db.list_collection_names.return_value = ['u', 'v', 'w']
mock_get_connection.return_value = {'some_db': mock_db}
table_list = self.engine.get_all_tables('some_db')
mock_db.list_collection_names.assert_called_once()
self.assertEqual(table_list.rows, ['u', 'v', 'w'])
|
import MySQLdb
import json
from datetime import timedelta, datetime
from unittest.mock import patch, Mock, ANY
import sqlparse
from django.contrib.auth import get_user_model
from django.test import TestCase
from common.config import SysConfig
from sql.engines import EngineBase
from sql.engines.goinception import GoInceptionEngine
from sql.engines.models import ResultSet, ReviewSet, ReviewResult
from sql.engines.mssql import MssqlEngine
from sql.engines.mysql import MysqlEngine
from sql.engines.redis import RedisEngine
from sql.engines.pgsql import PgSQLEngine
from sql.engines.oracle import OracleEngine
from sql.engines.mongo import MongoEngine
from sql.engines.inception import InceptionEngine, _repair_json_str
from sql.models import Instance, SqlWorkflow, SqlWorkflowContent
User = get_user_model()
class TestReviewSet(TestCase):
def test_review_set(self):
new_review_set = ReviewSet()
new_review_set.rows = [{'id': '1679123'}]
self.assertIn('1679123', new_review_set.json())
class TestEngineBase(TestCase):
@classmethod
def setUpClass(cls):
cls.u1 = User(username='some_user', display='用户1')
cls.u1.save()
cls.ins1 = Instance(instance_name='some_ins', type='master', db_type='mssql', host='some_host',
port=1366, user='ins_user', password='some_str')
cls.ins1.save()
cls.wf1 = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer=cls.u1.username,
engineer_display=cls.u1.display,
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=cls.ins1,
db_name='some_db',
syntax_type=1
)
cls.wfc1 = SqlWorkflowContent.objects.create(
workflow=cls.wf1,
sql_content='some_sql',
execute_result=json.dumps([{
'id': 1,
'sql': 'some_content'
}]))
@classmethod
def tearDownClass(cls):
cls.wfc1.delete()
cls.wf1.delete()
cls.ins1.delete()
cls.u1.delete()
def test_init_with_ins(self):
engine = EngineBase(instance=self.ins1)
self.assertEqual(self.ins1.instance_name, engine.instance_name)
self.assertEqual(self.ins1.user, engine.user)
class TestMssql(TestCase):
@classmethod
def setUpClass(cls):
cls.ins1 = Instance(instance_name='some_ins', type='slave', db_type='mssql', host='some_host',
port=1366, user='ins_user', password='some_str')
cls.ins1.save()
cls.engine = MssqlEngine(instance=cls.ins1)
cls.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=cls.ins1,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=cls.wf, sql_content='insert into some_tb values (1)')
@classmethod
def tearDownClass(cls):
cls.ins1.delete()
cls.wf.delete()
SqlWorkflowContent.objects.all().delete()
@patch('sql.engines.mssql.pyodbc.connect')
def testGetConnection(self, connect):
new_engine = MssqlEngine(instance=self.ins1)
new_engine.get_connection()
connect.assert_called_once()
@patch('sql.engines.mssql.pyodbc.connect')
def testQuery(self, connect):
cur = Mock()
connect.return_value.cursor = cur
cur.return_value.execute = Mock()
cur.return_value.fetchmany.return_value = (('v1', 'v2'),)
cur.return_value.description = (('k1', 'some_other_des'), ('k2', 'some_other_des'))
new_engine = MssqlEngine(instance=self.ins1)
query_result = new_engine.query(sql='some_str', limit_num=100)
cur.return_value.execute.assert_called()
cur.return_value.fetchmany.assert_called_once_with(100)
connect.return_value.close.assert_called_once()
self.assertIsInstance(query_result, ResultSet)
@patch.object(MssqlEngine, 'query')
def testAllDb(self, mock_query):
db_result = ResultSet()
db_result.rows = [('db_1',), ('db_2',)]
mock_query.return_value = db_result
new_engine = MssqlEngine(instance=self.ins1)
dbs = new_engine.get_all_databases()
self.assertEqual(dbs.rows, ['db_1', 'db_2'])
@patch.object(MssqlEngine, 'query')
def testAllTables(self, mock_query):
table_result = ResultSet()
table_result.rows = [('tb_1', 'some_des'), ('tb_2', 'some_des')]
mock_query.return_value = table_result
new_engine = MssqlEngine(instance=self.ins1)
tables = new_engine.get_all_tables('some_db')
mock_query.assert_called_once_with(db_name='some_db', sql=ANY)
self.assertEqual(tables.rows, ['tb_1', 'tb_2'])
@patch.object(MssqlEngine, 'query')
def testAllColumns(self, mock_query):
db_result = ResultSet()
db_result.rows = [('col_1', 'type'), ('col_2', 'type2')]
mock_query.return_value = db_result
new_engine = MssqlEngine(instance=self.ins1)
dbs = new_engine.get_all_columns_by_tb('some_db', 'some_tb')
self.assertEqual(dbs.rows, ['col_1', 'col_2'])
@patch.object(MssqlEngine, 'query')
def testDescribe(self, mock_query):
new_engine = MssqlEngine(instance=self.ins1)
new_engine.describe_table('some_db', 'some_db')
mock_query.assert_called_once()
def testQueryCheck(self):
new_engine = MssqlEngine(instance=self.ins1)
# 只抽查一个函数
banned_sql = 'select concat(phone,1) from user_table'
check_result = new_engine.query_check(db_name='some_db', sql=banned_sql)
self.assertTrue(check_result.get('bad_query'))
banned_sql = 'select phone from user_table where phone=concat(phone,1)'
check_result = new_engine.query_check(db_name='some_db', sql=banned_sql)
self.assertTrue(check_result.get('bad_query'))
sp_sql = "sp_helptext '[SomeName].[SomeAction]'"
check_result = new_engine.query_check(db_name='some_db', sql=sp_sql)
self.assertFalse(check_result.get('bad_query'))
self.assertEqual(check_result.get('filtered_sql'), sp_sql)
def test_filter_sql(self):
new_engine = MssqlEngine(instance=self.ins1)
# 只抽查一个函数
banned_sql = 'select user from user_table'
check_result = new_engine.filter_sql(sql=banned_sql, limit_num=10)
self.assertEqual(check_result, "select top 10 user from user_table")
def test_execute_check(self):
new_engine = MssqlEngine(instance=self.ins1)
test_sql = 'use database\ngo\nsome sql1\nGO\nsome sql2\n\r\nGo\nsome sql3\n\r\ngO\n'
check_result = new_engine.execute_check(db_name=None, sql=test_sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[1].__dict__['sql'], "use database\n")
self.assertEqual(check_result.rows[2].__dict__['sql'], "\nsome sql1\n")
self.assertEqual(check_result.rows[4].__dict__['sql'], "\nsome sql3\n\r\n")
@patch('sql.engines.mssql.MssqlEngine.execute')
def test_execute_workflow(self, mock_execute):
mock_execute.return_value.error = None
new_engine = MssqlEngine(instance=self.ins1)
new_engine.execute_workflow(self.wf)
# 有多少个备份表, 就需要execute多少次, 另外加上一条实际执行的次数
mock_execute.assert_called()
self.assertEqual(1, mock_execute.call_count)
@patch('sql.engines.mssql.MssqlEngine.get_connection')
def test_execute(self, mock_connect):
mock_cursor = Mock()
mock_connect.return_value.cursor = mock_cursor
new_engine = MssqlEngine(instance=self.ins1)
execute_result = new_engine.execute('some_db', 'some_sql')
# 验证结果, 无异常
self.assertIsNone(execute_result.error)
self.assertEqual('some_sql', execute_result.full_sql)
self.assertEqual(2, len(execute_result.rows))
mock_cursor.return_value.execute.assert_called()
mock_cursor.return_value.commit.assert_called()
mock_cursor.reset_mock()
# 验证异常
mock_cursor.return_value.execute.side_effect = Exception('Boom! some exception!')
execute_result = new_engine.execute('some_db', 'some_sql')
self.assertIn('Boom! some exception!', execute_result.error)
self.assertEqual('some_sql', execute_result.full_sql)
self.assertEqual(2, len(execute_result.rows))
mock_cursor.return_value.commit.assert_not_called()
mock_cursor.return_value.rollback.assert_called()
class TestMysql(TestCase):
def setUp(self):
self.ins1 = Instance(instance_name='some_ins', type='slave', db_type='mysql', host='some_host',
port=1366, user='ins_user', password='some_str')
self.ins1.save()
self.sys_config = SysConfig()
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins1,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
def tearDown(self):
self.ins1.delete()
self.sys_config.purge()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('MySQLdb.connect')
def test_engine_base_info(self, _conn):
new_engine = MysqlEngine(instance=self.ins1)
self.assertEqual(new_engine.name, 'MySQL')
self.assertEqual(new_engine.info, 'MySQL engine')
@patch('MySQLdb.connect')
def testGetConnection(self, connect):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.get_connection()
connect.assert_called_once()
@patch('MySQLdb.connect')
def testQuery(self, connect):
cur = Mock()
connect.return_value.cursor = cur
cur.return_value.execute = Mock()
cur.return_value.fetchmany.return_value = (('v1', 'v2'),)
cur.return_value.description = (('k1', 'some_other_des'), ('k2', 'some_other_des'))
new_engine = MysqlEngine(instance=self.ins1)
query_result = new_engine.query(sql='some_str', limit_num=100)
cur.return_value.execute.assert_called()
cur.return_value.fetchmany.assert_called_once_with(size=100)
connect.return_value.close.assert_called_once()
self.assertIsInstance(query_result, ResultSet)
@patch.object(MysqlEngine, 'query')
def testAllDb(self, mock_query):
db_result = ResultSet()
db_result.rows = [('db_1',), ('db_2',)]
mock_query.return_value = db_result
new_engine = MysqlEngine(instance=self.ins1)
dbs = new_engine.get_all_databases()
self.assertEqual(dbs.rows, ['db_1', 'db_2'])
@patch.object(MysqlEngine, 'query')
def testAllTables(self, mock_query):
table_result = ResultSet()
table_result.rows = [('tb_1', 'some_des'), ('tb_2', 'some_des')]
mock_query.return_value = table_result
new_engine = MysqlEngine(instance=self.ins1)
tables = new_engine.get_all_tables('some_db')
mock_query.assert_called_once_with(db_name='some_db', sql=ANY)
self.assertEqual(tables.rows, ['tb_1', 'tb_2'])
@patch.object(MysqlEngine, 'query')
def testAllColumns(self, mock_query):
db_result = ResultSet()
db_result.rows = [('col_1', 'type'), ('col_2', 'type2')]
mock_query.return_value = db_result
new_engine = MysqlEngine(instance=self.ins1)
dbs = new_engine.get_all_columns_by_tb('some_db', 'some_tb')
self.assertEqual(dbs.rows, ['col_1', 'col_2'])
@patch.object(MysqlEngine, 'query')
def testDescribe(self, mock_query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.describe_table('some_db', 'some_db')
mock_query.assert_called_once()
def testQueryCheck(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = '-- 测试\n select user from usertable'
check_result = new_engine.query_check(db_name='some_db', sql=sql_without_limit)
self.assertEqual(check_result['filtered_sql'], 'select user from usertable')
def test_query_check_wrong_sql(self):
new_engine = MysqlEngine(instance=self.ins1)
wrong_sql = '-- 测试'
check_result = new_engine.query_check(db_name='some_db', sql=wrong_sql)
self.assertDictEqual(check_result,
{'msg': '不支持的查询语法类型!', 'bad_query': True, 'filtered_sql': '-- 测试', 'has_star': False})
def test_query_check_update_sql(self):
new_engine = MysqlEngine(instance=self.ins1)
update_sql = 'update user set id=0'
check_result = new_engine.query_check(db_name='some_db', sql=update_sql)
self.assertDictEqual(check_result,
{'msg': '不支持的查询语法类型!', 'bad_query': True, 'filtered_sql': 'update user set id=0',
'has_star': False})
def test_filter_sql_with_delimiter(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable;'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=100)
self.assertEqual(check_result, 'select user from usertable limit 100;')
def test_filter_sql_without_delimiter(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=100)
self.assertEqual(check_result, 'select user from usertable limit 100;')
def test_filter_sql_with_limit(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable limit 10'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=1)
self.assertEqual(check_result, 'select user from usertable limit 1;')
def test_filter_sql_with_limit_min(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable limit 10'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=100)
self.assertEqual(check_result, 'select user from usertable limit 10;')
def test_filter_sql_with_limit_offset(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable limit 10 offset 100'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=1)
self.assertEqual(check_result, 'select user from usertable limit 1;')
def test_filter_sql_with_limit_nn(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'select user from usertable limit 10, 100'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=1)
self.assertEqual(check_result, 'select user from usertable limit 1;')
def test_filter_sql_upper(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'SELECT USER FROM usertable LIMIT 10, 100'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=1)
self.assertEqual(check_result, 'SELECT USER FROM usertable limit 1;')
def test_filter_sql_not_select(self):
new_engine = MysqlEngine(instance=self.ins1)
sql_without_limit = 'show create table usertable;'
check_result = new_engine.filter_sql(sql=sql_without_limit, limit_num=1)
self.assertEqual(check_result, 'show create table usertable;')
@patch('sql.engines.mysql.data_masking', return_value=ResultSet())
def test_query_masking(self, _data_masking):
query_result = ResultSet()
new_engine = MysqlEngine(instance=self.ins1)
masking_result = new_engine.query_masking(db_name='archery', sql='select 1', resultset=query_result)
self.assertIsInstance(masking_result, ResultSet)
@patch('sql.engines.mysql.data_masking', return_value=ResultSet())
def test_query_masking_not_select(self, _data_masking):
query_result = ResultSet()
new_engine = MysqlEngine(instance=self.ins1)
masking_result = new_engine.query_masking(db_name='archery', sql='explain select 1', resultset=query_result)
self.assertEqual(masking_result, query_result)
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_check_select_sql(self, _inception_engine):
self.sys_config.set('inception', 'true')
sql = 'select * from user'
inc_row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0, )
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回不支持语句',
errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
sql=sql)
_inception_engine.return_value.execute_check.return_value = ReviewSet(full_sql=sql, rows=[inc_row])
new_engine = MysqlEngine(instance=self.ins1)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_check_critical_sql(self, _inception_engine):
self.sys_config.set('inception', 'true')
self.sys_config.set('critical_ddl_regex', '^|update')
self.sys_config.get_all_config()
sql = 'update user set id=1'
inc_row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0, )
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回高危SQL',
errormessage='禁止提交匹配' + '^|update' + '条件的语句!',
sql=sql)
_inception_engine.return_value.execute_check.return_value = ReviewSet(full_sql=sql, rows=[inc_row])
new_engine = MysqlEngine(instance=self.ins1)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_check_normal_sql(self, _inception_engine):
self.sys_config.set('inception', 'true')
sql = 'update user set id=1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0, )
_inception_engine.return_value.execute_check.return_value = ReviewSet(full_sql=sql, rows=[row])
new_engine = MysqlEngine(instance=self.ins1)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_check_normal_sql_with_Exception(self, _inception_engine):
sql = 'update user set id=1'
_inception_engine.return_value.execute_check.side_effect = RuntimeError()
new_engine = MysqlEngine(instance=self.ins1)
with self.assertRaises(RuntimeError):
new_engine.execute_check(db_name=0, sql=sql)
@patch.object(MysqlEngine, 'query')
@patch('sql.engines.mysql.InceptionEngine')
def test_execute_workflow(self, _inception_engine, _query):
self.sys_config.set('inception', 'true')
sql = 'update user set id=1'
_inception_engine.return_value.execute.return_value = ReviewSet(full_sql=sql)
_query.return_value.rows = (('0',),)
new_engine = MysqlEngine(instance=self.ins1)
execute_result = new_engine.execute_workflow(self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_execute(self, _connect, _cursor, _execute):
new_engine = MysqlEngine(instance=self.ins1)
execute_result = new_engine.execute(self.wf)
self.assertIsInstance(execute_result, ResultSet)
@patch('MySQLdb.connect')
def test_server_version(self, _connect):
_connect.return_value.get_server_info.return_value = '5.7.20-16log'
new_engine = MysqlEngine(instance=self.ins1)
server_version = new_engine.server_version
self.assertTupleEqual(server_version, (5, 7, 20))
@patch.object(MysqlEngine, 'query')
def test_get_variables_not_filter(self, _query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.get_variables()
_query.assert_called_once()
@patch('MySQLdb.connect')
@patch.object(MysqlEngine, 'query')
def test_get_variables_filter(self, _query, _connect):
_connect.return_value.get_server_info.return_value = '5.7.20-16log'
new_engine = MysqlEngine(instance=self.ins1)
new_engine.get_variables(variables=['binlog_format'])
_query.assert_called()
@patch.object(MysqlEngine, 'query')
def test_set_variable(self, _query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.set_variable('binlog_format', 'ROW')
_query.assert_called_once_with(sql="set global binlog_format=ROW;")
@patch('sql.engines.mysql.GoInceptionEngine')
def test_osc_go_inception(self, _inception_engine):
self.sys_config.set('inception', 'false')
_inception_engine.return_value.osc_control.return_value = ReviewSet()
command = 'get'
sqlsha1 = 'xxxxx'
new_engine = MysqlEngine(instance=self.ins1)
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
@patch('sql.engines.mysql.InceptionEngine')
def test_osc_inception(self, _inception_engine):
self.sys_config.set('inception', 'true')
_inception_engine.return_value.osc_control.return_value = ReviewSet()
command = 'get'
sqlsha1 = 'xxxxx'
new_engine = MysqlEngine(instance=self.ins1)
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
@patch.object(MysqlEngine, 'query')
def test_kill_connection(self, _query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.kill_connection(100)
_query.assert_called_once_with(sql="kill 100")
@patch.object(MysqlEngine, 'query')
def test_seconds_behind_master(self, _query):
new_engine = MysqlEngine(instance=self.ins1)
new_engine.seconds_behind_master
_query.assert_called_once_with(sql="show slave status", close_conn=False,
cursorclass=MySQLdb.cursors.DictCursor)
class TestRedis(TestCase):
@classmethod
def setUpClass(cls):
cls.ins = Instance(instance_name='some_ins', type='slave', db_type='redis', host='some_host',
port=1366, user='ins_user', password='some_str')
cls.ins.save()
@classmethod
def tearDownClass(cls):
cls.ins.delete()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('redis.Redis')
def test_engine_base_info(self, _conn):
new_engine = RedisEngine(instance=self.ins)
self.assertEqual(new_engine.name, 'Redis')
self.assertEqual(new_engine.info, 'Redis engine')
@patch('redis.Redis')
def test_get_connection(self, _conn):
new_engine = RedisEngine(instance=self.ins)
new_engine.get_connection()
_conn.assert_called_once()
@patch('redis.Redis.execute_command', return_value=[1, 2, 3])
def test_query_return_list(self, _execute_command):
new_engine = RedisEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='keys *', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertTupleEqual(query_result.rows, ([1], [2], [3]))
@patch('redis.Redis.execute_command', return_value='text')
def test_query_return_str(self, _execute_command):
new_engine = RedisEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='keys *', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertTupleEqual(query_result.rows, (['text'],))
@patch('redis.Redis.execute_command', return_value='text')
def test_query_execute(self, _execute_command):
new_engine = RedisEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='keys *', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertTupleEqual(query_result.rows, (['text'],))
@patch('redis.Redis.config_get', return_value={"databases": 4})
def test_get_all_databases(self, _config_get):
new_engine = RedisEngine(instance=self.ins)
dbs = new_engine.get_all_databases()
self.assertListEqual(dbs.rows, ['0', '1', '2', '3'])
def test_query_check_safe_cmd(self):
safe_cmd = "keys 1*"
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.query_check(db_name=0, sql=safe_cmd)
self.assertDictEqual(check_result,
{'msg': '禁止执行该命令!', 'bad_query': True, 'filtered_sql': safe_cmd, 'has_star': False})
def test_query_check_danger_cmd(self):
safe_cmd = "keys *"
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.query_check(db_name=0, sql=safe_cmd)
self.assertDictEqual(check_result,
{'msg': '禁止执行该命令!', 'bad_query': True, 'filtered_sql': safe_cmd, 'has_star': False})
def test_filter_sql(self):
safe_cmd = "keys 1*"
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=safe_cmd, limit_num=100)
self.assertEqual(check_result, 'keys 1*')
def test_query_masking(self):
query_result = ResultSet()
new_engine = RedisEngine(instance=self.ins)
masking_result = new_engine.query_masking(db_name=0, sql='', resultset=query_result)
self.assertEqual(masking_result, query_result)
def test_execute_check(self):
sql = 'set 1 1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0)
new_engine = RedisEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name=0, sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('redis.Redis.execute_command', return_value='text')
def test_execute_workflow_success(self, _execute_command):
sql = 'set 1 1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0)
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql)
new_engine = RedisEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), row.__dict__.keys())
class TestPgSQL(TestCase):
@classmethod
def setUpClass(cls):
cls.ins = Instance(instance_name='some_ins', type='slave', db_type='pgsql', host='some_host',
port=1366, user='ins_user', password='some_str')
cls.ins.save()
cls.sys_config = SysConfig()
@classmethod
def tearDownClass(cls):
cls.ins.delete()
cls.sys_config.purge()
@patch('psycopg2.connect')
def test_engine_base_info(self, _conn):
new_engine = PgSQLEngine(instance=self.ins)
self.assertEqual(new_engine.name, 'PgSQL')
self.assertEqual(new_engine.info, 'PgSQL engine')
@patch('psycopg2.connect')
def test_get_connection(self, _conn):
new_engine = PgSQLEngine(instance=self.ins)
new_engine.get_connection("some_dbname")
_conn.assert_called_once()
@patch('psycopg2.connect.cursor.execute')
@patch('psycopg2.connect.cursor')
@patch('psycopg2.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchmany.return_value = [(1,)]
new_engine = PgSQLEngine(instance=self.ins)
query_result = new_engine.query(db_name="some_dbname", sql='select 1', limit_num=100, schema_name="some_schema")
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('psycopg2.connect.cursor.execute')
@patch('psycopg2.connect.cursor')
@patch('psycopg2.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = PgSQLEngine(instance=self.ins)
query_result = new_engine.query(db_name="some_dbname", sql='select 1', limit_num=0, schema_name="some_schema")
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('postgres',), ('archery',), ('template1',), ('template0',)]))
def test_get_all_databases(self, query):
new_engine = PgSQLEngine(instance=self.ins)
dbs = new_engine.get_all_databases()
self.assertListEqual(dbs.rows, ['archery'])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('information_schema',), ('archery',), ('pg_catalog',)]))
def test_get_all_schemas(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
schemas = new_engine.get_all_schemas(db_name='archery')
self.assertListEqual(schemas.rows, ['archery'])
@patch('sql.engines.pgsql.PgSQLEngine.query', return_value=ResultSet(rows=[('test',), ('test2',)]))
def test_get_all_tables(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
tables = new_engine.get_all_tables(db_name='archery', schema_name='archery')
self.assertListEqual(tables.rows, ['test2'])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('id',), ('name',)]))
def test_get_all_columns_by_tb(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
columns = new_engine.get_all_columns_by_tb(db_name='archery', tb_name='test2', schema_name='archery')
self.assertListEqual(columns.rows, ['id', 'name'])
@patch('sql.engines.pgsql.PgSQLEngine.query',
return_value=ResultSet(rows=[('postgres',), ('archery',), ('template1',), ('template0',)]))
def test_describe_table(self, _query):
new_engine = PgSQLEngine(instance=self.ins)
describe = new_engine.describe_table(db_name='archery', schema_name='archery', tb_name='text')
self.assertIsInstance(describe, ResultSet)
def test_query_check_disable_sql(self):
sql = "update xxx set a=1 "
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '不支持的查询语法类型!', 'bad_query': True, 'filtered_sql': sql.strip(), 'has_star': False})
def test_query_check_star_sql(self):
sql = "select * from xx "
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': 'SQL语句中含有 * ', 'bad_query': False, 'filtered_sql': sql.strip(), 'has_star': True})
def test_filter_sql_with_delimiter(self):
sql = "select * from xx;"
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select * from xx limit 100;")
def test_filter_sql_without_delimiter(self):
sql = "select * from xx"
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select * from xx limit 100;")
def test_filter_sql_with_limit(self):
sql = "select * from xx limit 10"
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=1)
self.assertEqual(check_result, "select * from xx limit 10;")
def test_query_masking(self):
query_result = ResultSet()
new_engine = PgSQLEngine(instance=self.ins)
masking_result = new_engine.query_masking(db_name=0, sql='', resultset=query_result)
self.assertEqual(masking_result, query_result)
def test_execute_check_select_sql(self):
sql = 'select * from user;'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回不支持语句',
errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
sql=sql)
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
def test_execute_check_critical_sql(self):
self.sys_config.set('critical_ddl_regex', '^|update')
self.sys_config.get_all_config()
sql = 'update user set id=1'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回高危SQL',
errormessage='禁止提交匹配' + '^|update' + '条件的语句!',
sql=sql)
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
def test_execute_check_normal_sql(self):
self.sys_config.purge()
sql = 'alter table tb set id=1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Audit completed',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0, )
new_engine = PgSQLEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('psycopg2.connect.cursor.execute')
@patch('psycopg2.connect.cursor')
@patch('psycopg2.connect')
def test_execute_workflow_success(self, _conn, _cursor, _execute):
sql = 'update user set id=1'
row = ReviewResult(id=1,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0)
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql)
new_engine = PgSQLEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), row.__dict__.keys())
@patch('psycopg2.connect.cursor.execute')
@patch('psycopg2.connect.cursor')
@patch('psycopg2.connect', return_value=RuntimeError)
def test_execute_workflow_exception(self, _conn, _cursor, _execute):
sql = 'update user set id=1'
row = ReviewResult(id=1,
errlevel=2,
stagestatus='Execute Failed',
errormessage=f'异常信息:{f"Oracle命令执行报错,语句:{sql}"}',
sql=sql,
affected_rows=0,
execute_time=0, )
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql)
with self.assertRaises(AttributeError):
new_engine = PgSQLEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), row.__dict__.keys())
class TestModel(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_result_set_rows_shadow(self):
# 测试默认值为空列表的坑
# 如果默认值是空列表,又使用的是累加的方法更新,会导致残留上次的列表
result_set1 = ResultSet()
for i in range(10):
result_set1.rows += [i]
brand_new_result_set = ResultSet()
self.assertEqual(brand_new_result_set.rows, [])
review_set1 = ReviewSet()
for i in range(10):
review_set1.rows += [i]
brand_new_review_set = ReviewSet()
self.assertEqual(brand_new_review_set.rows, [])
class TestInception(TestCase):
def setUp(self):
self.ins = Instance.objects.create(instance_name='some_ins', type='slave', db_type='mysql', host='some_host',
port=3306, user='ins_user', password='some_str')
self.ins_inc = Instance.objects.create(instance_name='some_ins_inc', type='slave', db_type='inception',
host='some_host', port=6669)
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
def tearDown(self):
self.ins.delete()
self.ins_inc.delete()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('MySQLdb.connect')
def test_get_connection(self, _connect):
new_engine = InceptionEngine()
new_engine.get_connection()
_connect.assert_called_once()
@patch('MySQLdb.connect')
def test_get_backup_connection(self, _connect):
new_engine = InceptionEngine()
new_engine.get_backup_connection()
_connect.assert_called_once()
@patch('sql.engines.inception.InceptionEngine.query')
def test_execute_check_normal_sql(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Audit completed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '']
_query.return_value = ResultSet(full_sql=sql, rows=[row])
new_engine = InceptionEngine()
check_result = new_engine.execute_check(instance=self.ins, db_name=0, sql=sql)
self.assertIsInstance(check_result, ReviewSet)
@patch('sql.engines.inception.InceptionEngine.query')
def test_execute_exception(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 1, 'Execute failed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '']
column_list = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows', 'sequence',
'backup_dbname', 'execute_time', 'sqlsha1']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = InceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('sql.engines.inception.InceptionEngine.query')
def test_execute_finish(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Execute Successfully', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '']
column_list = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows', 'sequence',
'backup_dbname', 'execute_time', 'sqlsha1']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = InceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = InceptionEngine()
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = InceptionEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=0)
self.assertIsInstance(query_result, ResultSet)
@patch('sql.engines.inception.InceptionEngine.query')
def test_query_print(self, _query):
sql = 'update user set id=100'
row = [1,
'select * from sql_instance limit 100',
0,
'{"command":"select","select_list":[{"type":"FIELD_ITEM","field":"*"}],"table_ref":[{"db":"archery","table":"sql_instance"}],"limit":{"limit":[{"type":"INT_ITEM","value":"100"}]}}',
'None']
column_list = ['ID', 'statement', 'errlevel', 'query_tree', 'errmsg']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = InceptionEngine()
print_result = new_engine.query_print(self.ins, db_name=None, sql=sql)
self.assertDictEqual(print_result, json.loads(_repair_json_str(row[3])))
@patch('MySQLdb.connect')
def test_get_rollback_list(self, _connect):
self.wf.sqlworkflowcontent.execute_result = """[{
"id": 1,
"stage": "RERUN",
"errlevel": 0,
"stagestatus": "Execute Successfully",
"errormessage": "None",
"sql": "use archer_test",
"affected_rows": 0,
"sequence": "'1554135032_13038_0'",
"backup_dbname": "None",
"execute_time": "0.000",
"sqlsha1": "",
"actual_affected_rows": 0
}, {
"id": 2,
"stage": "EXECUTED",
"errlevel": 0,
"stagestatus": "Execute Successfully Backup successfully",
"errormessage": "None",
"sql": "insert into tt1 (user_name)values('A'),('B'),('C')",
"affected_rows": 3,
"sequence": "'1554135032_13038_1'",
"backup_dbname": "mysql_3306_archer_test",
"execute_time": "0.000",
"sqlsha1": "",
"actual_affected_rows": 3
}]"""
self.wf.sqlworkflowcontent.save()
new_engine = InceptionEngine()
new_engine.get_rollback(self.wf)
@patch('sql.engines.inception.InceptionEngine.query')
def test_osc_get(self, _query):
new_engine = InceptionEngine()
command = 'get'
sqlsha1 = 'xxxxx'
sql = f"inception get osc_percent '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_osc_kill(self, _query):
new_engine = InceptionEngine()
command = 'kill'
sqlsha1 = 'xxxxx'
sql = f"inception stop alter '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_osc_not_support(self, _query):
new_engine = InceptionEngine()
command = 'stop'
sqlsha1 = 'xxxxx'
sql = f"inception stop alter '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
with self.assertRaisesMessage(ValueError, 'pt-osc不支持暂停和恢复,需要停止执行请使用终止按钮!'):
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
@patch('sql.engines.inception.InceptionEngine.query')
def test_get_variables(self, _query):
new_engine = InceptionEngine(instance=self.ins_inc)
new_engine.get_variables()
sql = f"inception get variables;"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_get_variables_filter(self, _query):
new_engine = InceptionEngine(instance=self.ins_inc)
new_engine.get_variables(variables=['inception_osc_on'])
sql = f"inception get variables 'inception_osc_on';"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.inception.InceptionEngine.query')
def test_set_variable(self, _query):
new_engine = InceptionEngine(instance=self.ins)
new_engine.set_variable('inception_osc_on', 'on')
_query.assert_called_once_with(sql="inception set inception_osc_on=on;")
class TestGoInception(TestCase):
def setUp(self):
self.ins = Instance.objects.create(instance_name='some_ins', type='slave', db_type='mysql',
host='some_host',
port=3306, user='ins_user', password='some_str')
self.ins_inc = Instance.objects.create(instance_name='some_ins_inc', type='slave', db_type='goinception',
host='some_host', port=4000)
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
def tearDown(self):
self.ins.delete()
self.ins_inc.delete()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('MySQLdb.connect')
def test_get_connection(self, _connect):
new_engine = GoInceptionEngine()
new_engine.get_connection()
_connect.assert_called_once()
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_execute_check_normal_sql(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Audit completed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '', '']
_query.return_value = ResultSet(full_sql=sql, rows=[row])
new_engine = GoInceptionEngine()
check_result = new_engine.execute_check(instance=self.ins, db_name=0, sql=sql)
self.assertIsInstance(check_result, ReviewSet)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_execute_exception(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 1, 'Execute failed', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '', '']
column_list = ['order_id', 'stage', 'error_level', 'stage_status', 'error_message', 'sql',
'affected_rows', 'sequence', 'backup_dbname', 'execute_time', 'sqlsha1', 'backup_time']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = GoInceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_execute_finish(self, _query):
sql = 'update user set id=100'
row = [1, 'CHECKED', 0, 'Execute Successfully', 'None', 'use archery', 0, "'0_0_0'", 'None', '0', '', '']
column_list = ['order_id', 'stage', 'error_level', 'stage_status', 'error_message', 'sql',
'affected_rows', 'sequence', 'backup_dbname', 'execute_time', 'sqlsha1', 'backup_time']
_query.return_value = ResultSet(full_sql=sql, rows=[row], column_list=column_list)
new_engine = GoInceptionEngine()
execute_result = new_engine.execute(workflow=self.wf)
self.assertIsInstance(execute_result, ReviewSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = GoInceptionEngine()
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
@patch('MySQLdb.connect.cursor.execute')
@patch('MySQLdb.connect.cursor')
@patch('MySQLdb.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = GoInceptionEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=0)
self.assertIsInstance(query_result, ResultSet)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_get(self, _query):
new_engine = GoInceptionEngine()
command = 'get'
sqlsha1 = 'xxxxx'
sql = f"inception get osc_percent '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_pause(self, _query):
new_engine = GoInceptionEngine()
command = 'pause'
sqlsha1 = 'xxxxx'
sql = f"inception {command} osc '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_resume(self, _query):
new_engine = GoInceptionEngine()
command = 'resume'
sqlsha1 = 'xxxxx'
sql = f"inception {command} osc '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_osc_kill(self, _query):
new_engine = GoInceptionEngine()
command = 'kill'
sqlsha1 = 'xxxxx'
sql = f"inception kill osc '{sqlsha1}';"
_query.return_value = ResultSet(full_sql=sql, rows=[], column_list=[])
new_engine.osc_control(sqlsha1=sqlsha1, command=command)
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_get_variables(self, _query):
new_engine = GoInceptionEngine(instance=self.ins_inc)
new_engine.get_variables()
sql = f"inception get variables;"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_get_variables_filter(self, _query):
new_engine = GoInceptionEngine(instance=self.ins_inc)
new_engine.get_variables(variables=['inception_osc_on'])
sql = f"inception get variables like 'inception_osc_on';"
_query.assert_called_once_with(sql=sql)
@patch('sql.engines.goinception.GoInceptionEngine.query')
def test_set_variable(self, _query):
new_engine = GoInceptionEngine(instance=self.ins)
new_engine.set_variable('inception_osc_on', 'on')
_query.assert_called_once_with(sql="inception set inception_osc_on=on;")
class TestOracle(TestCase):
"""Oracle 测试"""
def setUp(self):
self.ins = Instance.objects.create(instance_name='some_ins', type='slave', db_type='oracle',
host='some_host', port=3306, user='ins_user', password='some_str',
sid='some_id')
self.wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=self.wf)
self.sys_config = SysConfig()
def tearDown(self):
self.ins.delete()
self.sys_config.purge()
SqlWorkflow.objects.all().delete()
SqlWorkflowContent.objects.all().delete()
@patch('cx_Oracle.makedsn')
@patch('cx_Oracle.connect')
def test_get_connection(self, _connect, _makedsn):
# 填写 sid 测试
new_engine = OracleEngine(self.ins)
new_engine.get_connection()
_connect.assert_called_once()
_makedsn.assert_called_once()
# 填写 service_name 测试
_connect.reset_mock()
_makedsn.reset_mock()
self.ins.service_name = 'some_service'
self.ins.sid = ''
self.ins.save()
new_engine = OracleEngine(self.ins)
new_engine.get_connection()
_connect.assert_called_once()
_makedsn.assert_called_once()
# 都不填写, 检测 ValueError
_connect.reset_mock()
_makedsn.reset_mock()
self.ins.service_name = ''
self.ins.sid = ''
self.ins.save()
new_engine = OracleEngine(self.ins)
with self.assertRaises(ValueError):
new_engine.get_connection()
@patch('cx_Oracle.connect')
def test_engine_base_info(self, _conn):
new_engine = OracleEngine(instance=self.ins)
self.assertEqual(new_engine.name, 'Oracle')
self.assertEqual(new_engine.info, 'Oracle engine')
_conn.return_value.version = '12.1.0.2.0'
self.assertTupleEqual(new_engine.server_version, ('12', '1', '0'))
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect')
def test_query(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchmany.return_value = [(1,)]
new_engine = OracleEngine(instance=self.ins)
query_result = new_engine.query(db_name='archery', sql='select 1', limit_num=100)
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect')
def test_query_not_limit(self, _conn, _cursor, _execute):
_conn.return_value.cursor.return_value.fetchall.return_value = [(1,)]
new_engine = OracleEngine(instance=self.ins)
query_result = new_engine.query(db_name=0, sql='select 1', limit_num=0)
self.assertIsInstance(query_result, ResultSet)
self.assertListEqual(query_result.rows, [(1,)])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('AUD_SYS',), ('archery',), ('ANONYMOUS',)]))
def test_get_all_databases(self, _query):
new_engine = OracleEngine(instance=self.ins)
dbs = new_engine.get_all_databases()
self.assertListEqual(dbs.rows, ['archery'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('AUD_SYS',), ('archery',), ('ANONYMOUS',)]))
def test__get_all_databases(self, _query):
new_engine = OracleEngine(instance=self.ins)
dbs = new_engine._get_all_databases()
self.assertListEqual(dbs.rows, ['AUD_SYS', 'archery', 'ANONYMOUS'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('archery',)]))
def test__get_all_instances(self, _query):
new_engine = OracleEngine(instance=self.ins)
dbs = new_engine._get_all_instances()
self.assertListEqual(dbs.rows, ['archery'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('ANONYMOUS',), ('archery',), ('SYSTEM',)]))
def test_get_all_schemas(self, _query):
new_engine = OracleEngine(instance=self.ins)
schemas = new_engine._get_all_schemas()
self.assertListEqual(schemas.rows, ['archery'])
@patch('sql.engines.oracle.OracleEngine.query', return_value=ResultSet(rows=[('test',), ('test2',)]))
def test_get_all_tables(self, _query):
new_engine = OracleEngine(instance=self.ins)
tables = new_engine.get_all_tables(db_name='archery')
self.assertListEqual(tables.rows, ['test2'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('id',), ('name',)]))
def test_get_all_columns_by_tb(self, _query):
new_engine = OracleEngine(instance=self.ins)
columns = new_engine.get_all_columns_by_tb(db_name='archery', tb_name='test2')
self.assertListEqual(columns.rows, ['id', 'name'])
@patch('sql.engines.oracle.OracleEngine.query',
return_value=ResultSet(rows=[('archery',), ('template1',), ('template0',)]))
def test_describe_table(self, _query):
new_engine = OracleEngine(instance=self.ins)
describe = new_engine.describe_table(db_name='archery', tb_name='text')
self.assertIsInstance(describe, ResultSet)
def test_query_check_disable_sql(self):
sql = "update xxx set a=1;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '不支持语法!', 'bad_query': True, 'filtered_sql': sql.strip(';'),
'has_star': False})
@patch('sql.engines.oracle.OracleEngine.explain_check', return_value={'msg': '', 'rows': 0})
def test_query_check_star_sql(self, _explain_check):
sql = "select * from xx;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '禁止使用 * 关键词\n', 'bad_query': False, 'filtered_sql': sql.strip(';'),
'has_star': True})
def test_query_check_IndexError(self):
sql = ""
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '没有有效的SQL语句', 'bad_query': True, 'filtered_sql': sql.strip(), 'has_star': False})
@patch('sql.engines.oracle.OracleEngine.explain_check', return_value={'msg': '', 'rows': 0})
def test_query_check_plus(self, _explain_check):
sql = "select 100+1 from tb;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.query_check(db_name='archery', sql=sql)
self.assertDictEqual(check_result,
{'msg': '禁止使用 + 关键词\n', 'bad_query': True, 'filtered_sql': sql.strip(';'),
'has_star': False})
def test_filter_sql_with_delimiter(self):
sql = "select * from xx;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select sql_audit.* from (select * from xx) sql_audit where rownum <= 100")
def test_filter_sql_with_delimiter_and_where(self):
sql = "select * from xx where id>1;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result,
"select sql_audit.* from (select * from xx where id>1) sql_audit where rownum <= 100")
def test_filter_sql_without_delimiter(self):
sql = "select * from xx;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=100)
self.assertEqual(check_result, "select sql_audit.* from (select * from xx) sql_audit where rownum <= 100")
def test_filter_sql_with_limit(self):
sql = "select * from xx limit 10;"
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.filter_sql(sql=sql, limit_num=1)
self.assertEqual(check_result,
"select sql_audit.* from (select * from xx limit 10) sql_audit where rownum <= 1")
def test_query_masking(self):
query_result = ResultSet()
new_engine = OracleEngine(instance=self.ins)
masking_result = new_engine.query_masking(schema_name='', sql='select 1', resultset=query_result)
self.assertEqual(masking_result, query_result)
def test_execute_check_select_sql(self):
sql = 'select * from user;'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回不支持语句',
errormessage='仅支持DML和DDL语句,查询语句请使用SQL查询功能!',
sql=sqlparse.format(sql, strip_comments=True, reindent=True, keyword_case='lower'))
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
def test_execute_check_critical_sql(self):
self.sys_config.set('critical_ddl_regex', '^|update')
self.sys_config.get_all_config()
sql = 'update user set id=1'
row = ReviewResult(id=1, errlevel=2,
stagestatus='驳回高危SQL',
errormessage='禁止提交匹配' + '^|update' + '条件的语句!',
sql=sqlparse.format(sql, strip_comments=True, reindent=True, keyword_case='lower'))
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('sql.engines.oracle.OracleEngine.explain_check', return_value={'msg': '', 'rows': 0})
@patch('sql.engines.oracle.OracleEngine.get_sql_first_object_name', return_value='tb')
@patch('sql.engines.oracle.OracleEngine.object_name_check', return_value=True)
def test_execute_check_normal_sql(self, _explain_check, _get_sql_first_object_name, _object_name_check):
self.sys_config.purge()
sql = 'alter table tb set id=1'
row = ReviewResult(id=1,
errlevel=1,
stagestatus='当前平台,此语法不支持审核!',
errormessage='当前平台,此语法不支持审核!',
sql=sqlparse.format(sql, strip_comments=True, reindent=True, keyword_case='lower'),
affected_rows=0,
execute_time=0,
stmt_type='SQL',
object_owner='',
object_type='',
object_name='',
)
new_engine = OracleEngine(instance=self.ins)
check_result = new_engine.execute_check(db_name='archery', sql=sql)
self.assertIsInstance(check_result, ReviewSet)
self.assertEqual(check_result.rows[0].__dict__, row.__dict__)
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect')
def test_execute_workflow_success(self, _conn, _cursor, _execute):
sql = 'update user set id=1'
review_row = ReviewResult(id=1,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0,
stmt_type='SQL',
object_owner='',
object_type='',
object_name='', )
execute_row = ReviewResult(id=1,
errlevel=0,
stagestatus='Execute Successfully',
errormessage='None',
sql=sql,
affected_rows=0,
execute_time=0)
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql,
review_content=ReviewSet(rows=[review_row]).json())
new_engine = OracleEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), execute_row.__dict__.keys())
@patch('cx_Oracle.connect.cursor.execute')
@patch('cx_Oracle.connect.cursor')
@patch('cx_Oracle.connect', return_value=RuntimeError)
def test_execute_workflow_exception(self, _conn, _cursor, _execute):
sql = 'update user set id=1'
row = ReviewResult(id=1,
errlevel=2,
stagestatus='Execute Failed',
errormessage=f'异常信息:{f"Oracle命令执行报错,语句:{sql}"}',
sql=sql,
affected_rows=0,
execute_time=0,
stmt_type='SQL',
object_owner='',
object_type='',
object_name='',
)
wf = SqlWorkflow.objects.create(
workflow_name='some_name',
group_id=1,
group_name='g1',
engineer_display='',
audit_auth_groups='some_group',
create_time=datetime.now() - timedelta(days=1),
status='workflow_finish',
is_backup=True,
instance=self.ins,
db_name='some_db',
syntax_type=1
)
SqlWorkflowContent.objects.create(workflow=wf, sql_content=sql, review_content=ReviewSet(rows=[row]).json())
with self.assertRaises(AttributeError):
new_engine = OracleEngine(instance=self.ins)
execute_result = new_engine.execute_workflow(workflow=wf)
self.assertIsInstance(execute_result, ReviewSet)
self.assertEqual(execute_result.rows[0].__dict__.keys(), row.__dict__.keys())
class MongoTest(TestCase):
def setUp(self) -> None:
self.ins = Instance.objects.create(instance_name='some_ins', type='slave', db_type='mongo',
host='some_host', port=3306, user='ins_user')
self.engine = MongoEngine(instance=self.ins)
def tearDown(self) -> None:
self.ins.delete()
@patch('sql.engines.mongo.pymongo')
def test_get_connection(self, mock_pymongo):
_ = self.engine.get_connection()
mock_pymongo.MongoClient.assert_called_once()
@patch('sql.engines.mongo.MongoEngine.get_connection')
def test_query(self, mock_get_connection):
# TODO 正常查询还没做
test_sql = """{"collection": "job","count": true}"""
self.assertIsInstance(self.engine.query('archery', test_sql), ResultSet)
def test_query_check(self):
test_sql = """{"collection": "job","count": true}"""
check_result = self.engine.query_check(sql=test_sql)
self.assertEqual(False, check_result.get('bad_query'))
@patch('sql.engines.mongo.MongoEngine.get_connection')
def test_get_all_databases(self, mock_get_connection):
db_list = self.engine.get_all_databases()
self.assertIsInstance(db_list, ResultSet)
# mock_get_connection.return_value.list_database_names.assert_called_once()
@patch('sql.engines.mongo.MongoEngine.get_connection')
def test_get_all_tables(self, mock_get_connection):
mock_db = Mock()
# 下面是查表示例返回结果
mock_db.list_collection_names.return_value = ['u', 'v', 'w']
mock_get_connection.return_value = {'some_db': mock_db}
table_list = self.engine.get_all_tables('some_db')
mock_db.list_collection_names.assert_called_once()
self.assertEqual(table_list.rows, ['u', 'v', 'w'])
|
from datetime import datetime
from typing import Optional, Union
from discord.embeds import Embed
from discord.ext.commands import Cog, Context, group, has_permissions
from discord.member import Member
from discord.role import Role
from colors import Colors
from log import log
from permission import update_user_permission, list_user_permissions, get_user_permissions, has_own_permission, \
get_role_permissions, update_role_permission, list_role_permissions
from translation import get_user_language
class Permissions(Cog):
def __init__(self, bot):
self.bot = bot
@group(aliases=["permission"])
@has_permissions(administrator=True)
async def permissions(self, ctx: Context, mention: Union[Member, Role], permission: Optional[str] = "",
enabled: Optional[int] = -1):
if isinstance(mention, Member):
await self.member(ctx, mention, permission, enabled)
elif isinstance(mention, Role):
await self.role(ctx, mention, permission, enabled)
async def member(self, ctx: Context, member: Member, permission: Optional[str] = "", enabled: Optional[int] = -1):
lang = get_user_language(ctx.author.id)
if not permission:
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permissions_for(str(member)),
value="\n".join([f"`{i.title().replace("_", " ")}`" for i in
list_user_permissions(member)]) if list_user_permissions(
member) else lang.none)
embed.set_thumbnail(url=member.avatar_url)
await ctx.send(embed=embed)
return
if permission and enabled == -1:
perm = lang.yes if has_own_permission(permission, get_user_permissions(member)) else lang.no
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permission_for(permission, str(member)),
value=lang.enabled + f": `{perm}`")
embed.set_thumbnail(url=member.avatar_url)
await ctx.send(embed=embed)
return
if permission and enabled != -1:
before = lang.yes if has_own_permission(permission, get_user_permissions(member)) else lang.no
update_user_permission(member, permission, enabled > 0)
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permission_set_for(str(member)),
value="`" + permission.title().replace("_",
" ") + "` » `" + (
lang.yes if enabled > 0 else lang.no) + "`",
inline=False)
embed.add_field(name=lang.permissions_permission_before, value=f"`{before}`", inline=False)
embed.add_field(name=lang.permissions_permission_set_by, value=ctx.author.mention, inline=False)
embed.add_field(name=lang.permissions_permission_total,
value="\n".join([f"`{i.title().replace("_", " ")}`" for i in
list_user_permissions(member)]) if list_user_permissions(
member) else lang.none)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text=lang.member_id + ": " + str(member.id))
await ctx.send(embed=embed)
await log(ctx, embed=embed)
async def role(self, ctx: Context, role: Role, permission: Optional[str] = "", enabled: Optional[int] = -1):
lang = get_user_language(ctx.author.id)
if not permission:
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permissions_for("@" + str(role)),
value="\n".join([f"`{i.title().replace("_", " ")}`" for i in
list_role_permissions(role)]) if list_role_permissions(
role) else lang.none)
await ctx.send(embed=embed)
return
if permission and enabled == -1:
perm = lang.yes if has_own_permission(permission, get_role_permissions(role)) else lang.no
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permission_for(permission, "@" + str(role)),
value=lang.enabled + f": `{perm}`")
await ctx.send(embed=embed)
return
if permission and enabled != -1:
before = lang.yes if has_own_permission(permission, get_role_permissions(role)) else lang.no
update_role_permission(role, permission, enabled > 0)
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permission_set_for(str(role)),
value="`" + permission.title().replace("_",
" ") + "` » `" + (
lang.yes if enabled > 0 else lang.no) + "`",
inline=False)
embed.add_field(name=lang.permissions_permission_before, value=f"`{before}`", inline=False)
embed.add_field(name=lang.permissions_permission_set_by, value=ctx.author.mention, inline=False)
embed.add_field(name=lang.permissions_permission_total,
value="\n".join([f"`{i.title().replace("_", " ")}`" for i in
list_role_permissions(role)]) if list_role_permissions(
role) else lang.none)
embed.set_footer(text=lang.role_id + ": " + str(role.id))
await ctx.send(embed=embed)
await log(ctx, embed=embed)
def setup(bot):
bot.add_cog(Permissions(bot))
|
from datetime import datetime
from typing import Optional, Union
from discord.embeds import Embed
from discord.ext.commands import Cog, Context, group, has_permissions
from discord.member import Member
from discord.role import Role
from colors import Colors
from log import log
from permission import update_user_permission, list_user_permissions, get_user_permissions, has_own_permission, \
get_role_permissions, update_role_permission, list_role_permissions
from translation import get_user_language
class Permissions(Cog):
def __init__(self, bot):
self.bot = bot
@group(aliases=["permission"])
@has_permissions(administrator=True)
async def permissions(self, ctx: Context, mention: Union[Member, Role], permission: Optional[str] = "",
enabled: Optional[int] = -1):
if isinstance(mention, Member):
await self.member(ctx, mention, permission, enabled)
elif isinstance(mention, Role):
await self.role(ctx, mention, permission, enabled)
async def member(self, ctx: Context, member: Member, permission: Optional[str] = "", enabled: Optional[int] = -1):
lang = get_user_language(ctx.author.id)
if not permission:
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permissions_for(str(member)),
value="\n".join([f"`{i.title().replace('_', ' ')}`" for i in
list_user_permissions(member)]) if list_user_permissions(
member) else lang.none)
embed.set_thumbnail(url=member.avatar_url)
await ctx.send(embed=embed)
return
if permission and enabled == -1:
perm = lang.yes if has_own_permission(permission, get_user_permissions(member)) else lang.no
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permission_for(permission, str(member)),
value=lang.enabled + f": `{perm}`")
embed.set_thumbnail(url=member.avatar_url)
await ctx.send(embed=embed)
return
if permission and enabled != -1:
before = lang.yes if has_own_permission(permission, get_user_permissions(member)) else lang.no
update_user_permission(member, permission, enabled > 0)
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permission_set_for(str(member)),
value="`" + permission.title().replace("_",
" ") + "` » `" + (
lang.yes if enabled > 0 else lang.no) + "`",
inline=False)
embed.add_field(name=lang.permissions_permission_before, value=f"`{before}`", inline=False)
embed.add_field(name=lang.permissions_permission_set_by, value=ctx.author.mention, inline=False)
embed.add_field(name=lang.permissions_permission_total,
value="\n".join([f"`{i.title().replace('_', ' ')}`" for i in
list_user_permissions(member)]) if list_user_permissions(
member) else lang.none)
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(text=lang.member_id + ": " + str(member.id))
await ctx.send(embed=embed)
await log(ctx, embed=embed)
async def role(self, ctx: Context, role: Role, permission: Optional[str] = "", enabled: Optional[int] = -1):
lang = get_user_language(ctx.author.id)
if not permission:
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permissions_for("@" + str(role)),
value="\n".join([f"`{i.title().replace('_', ' ')}`" for i in
list_role_permissions(role)]) if list_role_permissions(
role) else lang.none)
await ctx.send(embed=embed)
return
if permission and enabled == -1:
perm = lang.yes if has_own_permission(permission, get_role_permissions(role)) else lang.no
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permission_for(permission, "@" + str(role)),
value=lang.enabled + f": `{perm}`")
await ctx.send(embed=embed)
return
if permission and enabled != -1:
before = lang.yes if has_own_permission(permission, get_role_permissions(role)) else lang.no
update_role_permission(role, permission, enabled > 0)
embed = Embed(color=Colors.permission, timestamp=datetime.now())
embed.add_field(name=lang.f_permissions_permission_set_for(str(role)),
value="`" + permission.title().replace("_",
" ") + "` » `" + (
lang.yes if enabled > 0 else lang.no) + "`",
inline=False)
embed.add_field(name=lang.permissions_permission_before, value=f"`{before}`", inline=False)
embed.add_field(name=lang.permissions_permission_set_by, value=ctx.author.mention, inline=False)
embed.add_field(name=lang.permissions_permission_total,
value="\n".join([f"`{i.title().replace('_', ' ')}`" for i in
list_role_permissions(role)]) if list_role_permissions(
role) else lang.none)
embed.set_footer(text=lang.role_id + ": " + str(role.id))
await ctx.send(embed=embed)
await log(ctx, embed=embed)
def setup(bot):
bot.add_cog(Permissions(bot))
|
#CADASTRO DE PESSOAS em dicionário - AULA 19 EXERCÍCIO 94
#dados das pessos: nome, sexo e idade
#todos os dicionários numa lista
#Informar quantos cadastrados, média de idade, lista de mulheres e nomes de pessoas de idade acima da média
#
pessoa = dict()
grupo = list()
somaidades = media = 0
while True:
pessoa.clear() #limnpeza do dicionário senão dá erro nos laços
pessoa["nome"] = str(input('Nome: ')).strip()
pessoa["sexo"] = str(input('Sexo: [M/F] ')).strip().upper()
pessoa["idade"] = int(input('Idade: '))
grupo.append(pessoa.copy()) #cópia do dicionário para lista
cont = str(input('Continuar? [S/N] ')).strip().lower()
somaidades += pessoa["idade"]
if cont == 'n':
break
media = somaidades/len(grupo)
print('-'*50)
print(f'A) Pessoas cadastradas: {len(grupo)}')
print(f'B) Média de idade: {media:.2f} anos')
print(f'C) Mulheres cadastradas: ', end='')
for i in range(len(grupo)):
if grupo[i]["sexo"] == 'F':
print(f'{grupo[i]['nome']} ', end='')
print()
print(f'D) Acima da média: ', end='')
for i in range(len(grupo)):
if grupo[i]["idade"] > media:
print(f'{grupo[i]['nome']} {grupo[i]['idade']} anos ', end='')
print()
print('-'*50)
|
#CADASTRO DE PESSOAS em dicionário - AULA 19 EXERCÍCIO 94
#dados das pessos: nome, sexo e idade
#todos os dicionários numa lista
#Informar quantos cadastrados, média de idade, lista de mulheres e nomes de pessoas de idade acima da média
#
pessoa = dict()
grupo = list()
somaidades = media = 0
while True:
pessoa.clear() #limnpeza do dicionário senão dá erro nos laços
pessoa["nome"] = str(input('Nome: ')).strip()
pessoa["sexo"] = str(input('Sexo: [M/F] ')).strip().upper()
pessoa["idade"] = int(input('Idade: '))
grupo.append(pessoa.copy()) #cópia do dicionário para lista
cont = str(input('Continuar? [S/N] ')).strip().lower()
somaidades += pessoa["idade"]
if cont == 'n':
break
media = somaidades/len(grupo)
print('-'*50)
print(f'A) Pessoas cadastradas: {len(grupo)}')
print(f'B) Média de idade: {media:.2f} anos')
print(f'C) Mulheres cadastradas: ', end='')
for i in range(len(grupo)):
if grupo[i]["sexo"] == 'F':
print(f'{grupo[i]["nome"]} ', end='')
print()
print(f'D) Acima da média: ', end='')
for i in range(len(grupo)):
if grupo[i]["idade"] > media:
print(f'{grupo[i]["nome"]} {grupo[i]["idade"]} anos ', end='')
print()
print('-'*50)
|
from typing import Any, Dict, Tuple
from ee.clickhouse.models.property import get_property_string_expr
from ee.clickhouse.queries.event_query import ClickhouseEventQuery
from posthog.constants import AUTOCAPTURE_EVENT, PAGEVIEW_EVENT, SCREEN_EVENT
from posthog.models.filters.path_filter import PathFilter
class PathEventQuery(ClickhouseEventQuery):
FUNNEL_PERSONS_ALIAS = "funnel_persons"
_filter: PathFilter
def __init__(
self,
filter: PathFilter,
team_id: int,
round_interval=False,
should_join_distinct_ids=False,
should_join_persons=False,
**kwargs,
) -> None:
super().__init__(filter, team_id, round_interval, should_join_distinct_ids, should_join_persons, **kwargs)
def get_query(self) -> Tuple[str, Dict[str, Any]]:
# TODO: ColumnOptimizer with options like self._filter.include_pageviews, self._filter.include_screenviews,
funnel_paths_timestamp = ""
funnel_paths_join = ""
funnel_paths_filter = ""
if self._filter.funnel_paths:
funnel_paths_timestamp = f"{self.FUNNEL_PERSONS_ALIAS}.timestamp as min_timestamp"
funnel_paths_join = f"JOIN {self.FUNNEL_PERSONS_ALIAS} ON {self.FUNNEL_PERSONS_ALIAS}.person_id = {self.DISTINCT_ID_TABLE_ALIAS}.person_id"
funnel_paths_filter = f"AND {self.EVENT_TABLE_ALIAS}.timestamp >= min_timestamp"
_fields = [
f"{self.EVENT_TABLE_ALIAS}.timestamp AS timestamp",
(
f"if(event = '{SCREEN_EVENT}', {self._get_screen_name_parsing()}, "
f"if({self.EVENT_TABLE_ALIAS}.event = '{PAGEVIEW_EVENT}', {self._get_current_url_parsing()}, "
f"if({self.EVENT_TABLE_ALIAS}.event = '{AUTOCAPTURE_EVENT}', concat('autocapture:', {self.EVENT_TABLE_ALIAS}.elements_chain), "
f"{self.EVENT_TABLE_ALIAS}.event))) AS path_item"
),
f"{self.DISTINCT_ID_TABLE_ALIAS}.person_id as person_id" if self._should_join_distinct_ids else "",
funnel_paths_timestamp,
]
_fields = list(filter(None, _fields))
date_query, date_params = self._get_date_filter()
self.params.update(date_params)
prop_filters = self._filter.properties
prop_query, prop_params = self._get_props(prop_filters)
self.params.update(prop_params)
event_query, event_params = self._get_event_query()
self.params.update(event_params)
query = f"""
SELECT {','.join(_fields)} FROM events {self.EVENT_TABLE_ALIAS}
{self._get_disintct_id_query()}
{self._get_person_query()}
{funnel_paths_join}
WHERE team_id = %(team_id)s
{event_query}
{date_query}
{prop_query}
{funnel_paths_filter}
ORDER BY {self.DISTINCT_ID_TABLE_ALIAS}.person_id, {self.EVENT_TABLE_ALIAS}.timestamp
"""
return query, self.params
def _determine_should_join_distinct_ids(self) -> None:
self._should_join_distinct_ids = True
def _get_current_url_parsing(self):
path_type, _ = get_property_string_expr("events", "$current_url", "'$current_url'", "properties")
return f"if(length({path_type}) > 1, trim( TRAILING '/' FROM {path_type}), {path_type})"
def _get_screen_name_parsing(self):
path_type, _ = get_property_string_expr("events", "$screen_name", "'$screen_name'", "properties")
return path_type
def _get_event_query(self) -> Tuple[str, Dict[str, Any]]:
params: Dict[str, Any] = {}
conditions = []
or_conditions = []
if self._filter.include_pageviews:
or_conditions.append(f"event = '{PAGEVIEW_EVENT}'")
if self._filter.include_screenviews:
or_conditions.append(f"event = '{SCREEN_EVENT}'")
if self._filter.include_autocaptures:
or_conditions.append(f"event = '{AUTOCAPTURE_EVENT}'")
if self._filter.include_all_custom_events:
or_conditions.append(f"NOT event LIKE '$%%'")
if self._filter.custom_events:
or_conditions.append(f"event IN %(custom_events)s")
params["custom_events"] = self._filter.custom_events
if or_conditions:
conditions.append(f"({" OR ".join(or_conditions)})")
if self._filter.exclude_events:
conditions.append(f"NOT event IN %(exclude_events)s")
params["exclude_events"] = self._filter.exclude_events
if conditions:
return f" AND {" AND ".join(conditions)}", params
return "", {}
|
from typing import Any, Dict, Tuple
from ee.clickhouse.models.property import get_property_string_expr
from ee.clickhouse.queries.event_query import ClickhouseEventQuery
from posthog.constants import AUTOCAPTURE_EVENT, PAGEVIEW_EVENT, SCREEN_EVENT
from posthog.models.filters.path_filter import PathFilter
class PathEventQuery(ClickhouseEventQuery):
FUNNEL_PERSONS_ALIAS = "funnel_persons"
_filter: PathFilter
def __init__(
self,
filter: PathFilter,
team_id: int,
round_interval=False,
should_join_distinct_ids=False,
should_join_persons=False,
**kwargs,
) -> None:
super().__init__(filter, team_id, round_interval, should_join_distinct_ids, should_join_persons, **kwargs)
def get_query(self) -> Tuple[str, Dict[str, Any]]:
# TODO: ColumnOptimizer with options like self._filter.include_pageviews, self._filter.include_screenviews,
funnel_paths_timestamp = ""
funnel_paths_join = ""
funnel_paths_filter = ""
if self._filter.funnel_paths:
funnel_paths_timestamp = f"{self.FUNNEL_PERSONS_ALIAS}.timestamp as min_timestamp"
funnel_paths_join = f"JOIN {self.FUNNEL_PERSONS_ALIAS} ON {self.FUNNEL_PERSONS_ALIAS}.person_id = {self.DISTINCT_ID_TABLE_ALIAS}.person_id"
funnel_paths_filter = f"AND {self.EVENT_TABLE_ALIAS}.timestamp >= min_timestamp"
_fields = [
f"{self.EVENT_TABLE_ALIAS}.timestamp AS timestamp",
(
f"if(event = '{SCREEN_EVENT}', {self._get_screen_name_parsing()}, "
f"if({self.EVENT_TABLE_ALIAS}.event = '{PAGEVIEW_EVENT}', {self._get_current_url_parsing()}, "
f"if({self.EVENT_TABLE_ALIAS}.event = '{AUTOCAPTURE_EVENT}', concat('autocapture:', {self.EVENT_TABLE_ALIAS}.elements_chain), "
f"{self.EVENT_TABLE_ALIAS}.event))) AS path_item"
),
f"{self.DISTINCT_ID_TABLE_ALIAS}.person_id as person_id" if self._should_join_distinct_ids else "",
funnel_paths_timestamp,
]
_fields = list(filter(None, _fields))
date_query, date_params = self._get_date_filter()
self.params.update(date_params)
prop_filters = self._filter.properties
prop_query, prop_params = self._get_props(prop_filters)
self.params.update(prop_params)
event_query, event_params = self._get_event_query()
self.params.update(event_params)
query = f"""
SELECT {','.join(_fields)} FROM events {self.EVENT_TABLE_ALIAS}
{self._get_disintct_id_query()}
{self._get_person_query()}
{funnel_paths_join}
WHERE team_id = %(team_id)s
{event_query}
{date_query}
{prop_query}
{funnel_paths_filter}
ORDER BY {self.DISTINCT_ID_TABLE_ALIAS}.person_id, {self.EVENT_TABLE_ALIAS}.timestamp
"""
return query, self.params
def _determine_should_join_distinct_ids(self) -> None:
self._should_join_distinct_ids = True
def _get_current_url_parsing(self):
path_type, _ = get_property_string_expr("events", "$current_url", "'$current_url'", "properties")
return f"if(length({path_type}) > 1, trim( TRAILING '/' FROM {path_type}), {path_type})"
def _get_screen_name_parsing(self):
path_type, _ = get_property_string_expr("events", "$screen_name", "'$screen_name'", "properties")
return path_type
def _get_event_query(self) -> Tuple[str, Dict[str, Any]]:
params: Dict[str, Any] = {}
conditions = []
or_conditions = []
if self._filter.include_pageviews:
or_conditions.append(f"event = '{PAGEVIEW_EVENT}'")
if self._filter.include_screenviews:
or_conditions.append(f"event = '{SCREEN_EVENT}'")
if self._filter.include_autocaptures:
or_conditions.append(f"event = '{AUTOCAPTURE_EVENT}'")
if self._filter.include_all_custom_events:
or_conditions.append(f"NOT event LIKE '$%%'")
if self._filter.custom_events:
or_conditions.append(f"event IN %(custom_events)s")
params["custom_events"] = self._filter.custom_events
if or_conditions:
conditions.append(f"({' OR '.join(or_conditions)})")
if self._filter.exclude_events:
conditions.append(f"NOT event IN %(exclude_events)s")
params["exclude_events"] = self._filter.exclude_events
if conditions:
return f" AND {' AND '.join(conditions)}", params
return "", {}
|
import abc
from collections import defaultdict
from contextlib import contextmanager
from dataclasses import dataclass, field, replace
import math
import struct
import sys
import traceback
import typing
from typing import (
AbstractSet,
Callable,
Collection,
DefaultDict,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
)
from .c_types import CType, TypeMap
from .demangle_codewarrior import parse as demangle_codewarrior_parse, CxxSymbol
from .error import DecompFailure, static_assert_unreachable
from .flow_graph import (
ArchFlowGraph,
FlowGraph,
Function,
Node,
ReturnNode,
SwitchNode,
TerminalNode,
locs_clobbered_until_dominator,
)
from .ir_pattern import IrPattern, simplify_ir_patterns
from .options import CodingStyle, Formatter, Options, Target
from .parse_file import AsmData, AsmDataEntry
from .parse_instruction import (
ArchAsm,
Argument,
AsmAddressMode,
AsmGlobalSymbol,
AsmLiteral,
BinOp,
Instruction,
InstrProcessingFailure,
Macro,
Register,
StackLocation,
current_instr,
)
from .types import (
AccessPath,
FunctionParam,
FunctionSignature,
StructDeclaration,
Type,
TypePool,
)
InstrSet = Collection[str]
InstrMap = Mapping[str, Callable[["InstrArgs"], "Expression"]]
StmtInstrMap = Mapping[str, Callable[["InstrArgs"], "Statement"]]
CmpInstrMap = Mapping[str, Callable[["InstrArgs"], "Condition"]]
StoreInstrMap = Mapping[str, Callable[["InstrArgs"], Optional["StoreStmt"]]]
MaybeInstrMap = Mapping[str, Callable[["InstrArgs"], Optional["Expression"]]]
PairInstrMap = Mapping[str, Callable[["InstrArgs"], Tuple["Expression", "Expression"]]]
ImplicitInstrMap = Mapping[str, Tuple[Register, Callable[["InstrArgs"], "Expression"]]]
PpcCmpInstrMap = Mapping[str, Callable[["InstrArgs", str], "Expression"]]
class Arch(ArchFlowGraph):
instrs_ignore: InstrSet = set()
instrs_store: StoreInstrMap = {}
instrs_store_update: StoreInstrMap = {}
instrs_load_update: InstrMap = {}
instrs_branches: CmpInstrMap = {}
instrs_float_branches: InstrSet = set()
instrs_float_comp: CmpInstrMap = {}
instrs_ppc_compare: PpcCmpInstrMap = {}
instrs_jumps: InstrSet = set()
instrs_fn_call: InstrSet = set()
instrs_no_dest: StmtInstrMap = {}
instrs_hi_lo: PairInstrMap = {}
instrs_source_first: InstrMap = {}
instrs_destination_first: InstrMap = {}
instrs_implicit_destination: ImplicitInstrMap = {}
@abc.abstractmethod
def function_abi(
self,
fn_sig: FunctionSignature,
likely_regs: Dict[Register, bool],
*,
for_call: bool,
) -> "Abi":
"""
Compute stack positions/registers used by a function based on its type
information. Also computes a list of registers that may contain arguments,
if the function has varargs or an unknown/incomplete type.
"""
...
@abc.abstractmethod
def function_return(self, expr: "Expression") -> Dict[Register, "Expression"]:
"""
Compute register location(s) & values that will hold the return value
of the function call `expr`.
This must have a value for each register in `all_return_regs` in order to stay
consistent with `Instruction.outputs`. This is why we can't use the
function's return type, even though it may be more accurate.
"""
...
# These are defined here to avoid a circular import in flow_graph.py
ir_patterns: List[IrPattern] = []
def simplify_ir(self, flow_graph: FlowGraph) -> None:
simplify_ir_patterns(self, flow_graph, self.ir_patterns)
ASSOCIATIVE_OPS: Set[str] = {"+", "&&", "||", "&", "|", "^", "*"}
COMPOUND_ASSIGNMENT_OPS: Set[str] = {"+", "-", "*", "/", "%", "&", "|", "^", "<<", ">>"}
PSEUDO_FUNCTION_OPS: Set[str] = {"MULT_HI", "MULTU_HI", "DMULT_HI", "DMULTU_HI", "CLZ"}
def as_type(expr: "Expression", type: Type, silent: bool) -> "Expression":
type = type.weaken_void_ptr()
ptr_target_type = type.get_pointer_target()
if expr.type.unify(type):
if silent or isinstance(expr, Literal):
return expr
elif ptr_target_type is not None:
ptr_target_type_size = ptr_target_type.get_size_bytes()
field_path, field_type, _ = expr.type.get_deref_field(
0, target_size=ptr_target_type_size
)
if field_path is not None and field_type.unify(ptr_target_type):
expr = AddressOf(
StructAccess(
struct_var=expr,
offset=0,
target_size=ptr_target_type_size,
field_path=field_path,
stack_info=None,
type=field_type,
),
type=type,
)
if silent:
return expr
return Cast(expr=expr, reinterpret=True, silent=False, type=type)
def as_f32(expr: "Expression") -> "Expression":
return as_type(expr, Type.f32(), True)
def as_f64(expr: "Expression") -> "Expression":
return as_type(expr, Type.f64(), True)
def as_sintish(expr: "Expression", *, silent: bool = False) -> "Expression":
return as_type(expr, Type.sintish(), silent)
def as_uintish(expr: "Expression") -> "Expression":
return as_type(expr, Type.uintish(), False)
def as_u32(expr: "Expression") -> "Expression":
return as_type(expr, Type.u32(), False)
def as_s64(expr: "Expression", *, silent: bool = False) -> "Expression":
return as_type(expr, Type.s64(), silent)
def as_u64(expr: "Expression", *, silent: bool = False) -> "Expression":
return as_type(expr, Type.u64(), silent)
def as_intish(expr: "Expression") -> "Expression":
return as_type(expr, Type.intish(), True)
def as_int64(expr: "Expression") -> "Expression":
return as_type(expr, Type.int64(), True)
def as_intptr(expr: "Expression") -> "Expression":
return as_type(expr, Type.intptr(), True)
def as_ptr(expr: "Expression") -> "Expression":
return as_type(expr, Type.ptr(), True)
def as_function_ptr(expr: "Expression") -> "Expression":
return as_type(expr, Type.ptr(Type.function()), True)
@dataclass
class StackInfo:
function: Function
global_info: "GlobalInfo"
flow_graph: FlowGraph
allocated_stack_size: int = 0
is_leaf: bool = True
is_variadic: bool = False
uses_framepointer: bool = False
subroutine_arg_top: int = 0
callee_save_regs: Set[Register] = field(default_factory=set)
callee_save_reg_region: Tuple[int, int] = (0, 0)
unique_type_map: Dict[Tuple[str, object], "Type"] = field(default_factory=dict)
local_vars: List["LocalVar"] = field(default_factory=list)
temp_vars: List["EvalOnceStmt"] = field(default_factory=list)
phi_vars: List["PhiExpr"] = field(default_factory=list)
reg_vars: Dict[Register, "RegisterVar"] = field(default_factory=dict)
used_reg_vars: Set[Register] = field(default_factory=set)
arguments: List["PassedInArg"] = field(default_factory=list)
temp_name_counter: Dict[str, int] = field(default_factory=dict)
nonzero_accesses: Set["Expression"] = field(default_factory=set)
param_names: Dict[int, str] = field(default_factory=dict)
stack_pointer_type: Optional[Type] = None
replace_first_arg: Optional[Tuple[str, Type]] = None
weak_stack_var_types: Dict[int, Type] = field(default_factory=dict)
weak_stack_var_locations: Set[int] = field(default_factory=set)
def temp_var(self, prefix: str) -> str:
counter = self.temp_name_counter.get(prefix, 0) + 1
self.temp_name_counter[prefix] = counter
return prefix + (f"_{counter}" if counter > 1 else "")
def in_subroutine_arg_region(self, location: int) -> bool:
if self.global_info.arch.arch == Target.ArchEnum.PPC:
return False
if self.is_leaf:
return False
assert self.subroutine_arg_top is not None
return location < self.subroutine_arg_top
def in_callee_save_reg_region(self, location: int) -> bool:
lower_bound, upper_bound = self.callee_save_reg_region
if lower_bound <= location < upper_bound:
return True
# PPC saves LR in the header of the previous stack frame
if (
self.global_info.arch.arch == Target.ArchEnum.PPC
and location == self.allocated_stack_size + 4
):
return True
return False
def location_above_stack(self, location: int) -> bool:
return location >= self.allocated_stack_size
def add_known_param(self, offset: int, name: Optional[str], type: Type) -> None:
# A common pattern in C for OOP-style polymorphism involves casting a general "base" struct
# to a specific "class" struct, where the first member of the class struct is the base struct.
#
# For the first argument of the function, if it is a pointer to a base struct, and there
# exists a class struct named after the first part of the function name, assume that
# this pattern is being used. Internally, treat the argument as a pointer to the *class*
# struct, even though it is only a pointer to the *base* struct in the provided context.
if offset == 0 and type.is_pointer() and self.replace_first_arg is None:
namespace = self.function.name.partition("_")[0]
base_struct_type = type.get_pointer_target()
self_struct = self.global_info.typepool.get_struct_by_tag_name(
namespace, self.global_info.typemap
)
if (
self_struct is not None
and base_struct_type is not None
and base_struct_type.is_struct()
):
# Check if `self_struct_type` contains a `base_struct_type` at offset 0
self_struct_type = Type.struct(self_struct)
field_path, field_type, _ = self_struct_type.get_field(
offset=0, target_size=base_struct_type.get_size_bytes()
)
if (
field_path is not None
and field_type.unify(base_struct_type)
and not self_struct_type.unify(base_struct_type)
):
# Success, it looks like `self_struct_type` extends `base_struct_type`.
# By default, name the local var `self`, unless the argument name is `thisx` then use `this`
self.replace_first_arg = (name or "_self", type)
name = "this" if name == "thisx" else "self"
type = Type.ptr(Type.struct(self_struct))
if name:
self.param_names[offset] = name
_, arg = self.get_argument(offset)
self.add_argument(arg)
arg.type.unify(type)
def get_param_name(self, offset: int) -> Optional[str]:
return self.param_names.get(offset)
def add_local_var(self, var: "LocalVar") -> None:
if any(v.value == var.value for v in self.local_vars):
return
self.local_vars.append(var)
# Make sure the local vars stay sorted in order on the stack.
self.local_vars.sort(key=lambda v: v.value)
def add_argument(self, arg: "PassedInArg") -> None:
if any(a.value == arg.value for a in self.arguments):
return
self.arguments.append(arg)
self.arguments.sort(key=lambda a: a.value)
def get_argument(self, location: int) -> Tuple["Expression", "PassedInArg"]:
real_location = location & -4
arg = PassedInArg(
real_location,
copied=True,
stack_info=self,
type=self.unique_type_for("arg", real_location, Type.any_reg()),
)
if real_location == location - 3:
return as_type(arg, Type.int_of_size(8), True), arg
if real_location == location - 2:
return as_type(arg, Type.int_of_size(16), True), arg
return arg, arg
def record_struct_access(self, ptr: "Expression", location: int) -> None:
if location:
self.nonzero_accesses.add(unwrap_deep(ptr))
def has_nonzero_access(self, ptr: "Expression") -> bool:
return unwrap_deep(ptr) in self.nonzero_accesses
def unique_type_for(self, category: str, key: object, default: Type) -> "Type":
key = (category, key)
if key not in self.unique_type_map:
self.unique_type_map[key] = default
return self.unique_type_map[key]
def saved_reg_symbol(self, reg_name: str) -> "GlobalSymbol":
sym_name = "saved_reg_" + reg_name
type = self.unique_type_for("saved_reg", sym_name, Type.any_reg())
return GlobalSymbol(symbol_name=sym_name, type=type)
def should_save(self, expr: "Expression", offset: Optional[int]) -> bool:
expr = early_unwrap(expr)
if isinstance(expr, GlobalSymbol) and (
expr.symbol_name.startswith("saved_reg_") or expr.symbol_name == "sp"
):
return True
if (
isinstance(expr, PassedInArg)
and not expr.copied
and (offset is None or offset == self.allocated_stack_size + expr.value)
):
return True
return False
def get_stack_var(self, location: int, *, store: bool) -> "Expression":
# See `get_stack_info` for explanation
if self.in_callee_save_reg_region(location):
# Some annoying bookkeeping instruction. To avoid
# further special-casing, just return whatever - it won't matter.
return LocalVar(location, type=Type.any_reg(), path=None)
elif self.location_above_stack(location):
ret, arg = self.get_argument(location - self.allocated_stack_size)
if not store:
self.add_argument(arg)
return ret
elif self.in_subroutine_arg_region(location):
return SubroutineArg(location, type=Type.any_reg())
else:
# Local variable
assert self.stack_pointer_type is not None
field_path, field_type, _ = self.stack_pointer_type.get_deref_field(
location, target_size=None
)
# Some variables on the stack are compiler-managed, and aren't declared
# in the original source. These variables can have different types inside
# different blocks, so we track their types but assume that they may change
# on each store.
# TODO: Because the types are tracked in StackInfo instead of RegInfo, it is
# possible that a load could incorrectly use a weak type from a sibling node
# instead of a parent node. A more correct implementation would use similar
# logic to the PhiExpr system. In practice however, storing types in StackInfo
# works well enough because nodes are traversed approximately depth-first.
# TODO: Maybe only do this for certain configurable regions?
# Get the previous type stored in `location`
previous_stored_type = self.weak_stack_var_types.get(location)
if previous_stored_type is not None:
# Check if the `field_type` is compatible with the type of the last store
if not previous_stored_type.unify(field_type):
# The types weren't compatible: mark this `location` as "weak"
# This marker is only used to annotate the output
self.weak_stack_var_locations.add(location)
if store:
# If there's already been a store to `location`, then return a fresh type
field_type = Type.any_field()
else:
# Use the type of the last store instead of the one from `get_deref_field()`
field_type = previous_stored_type
# Track the type last stored at `location`
if store:
self.weak_stack_var_types[location] = field_type
return LocalVar(location, type=field_type, path=field_path)
def maybe_get_register_var(self, reg: Register) -> Optional["RegisterVar"]:
return self.reg_vars.get(reg)
def add_register_var(self, reg: Register, name: str) -> None:
type = Type.floatish() if reg.is_float() else Type.intptr()
self.reg_vars[reg] = RegisterVar(reg=reg, type=type, name=name)
def use_register_var(self, var: "RegisterVar") -> None:
self.used_reg_vars.add(var.reg)
def is_stack_reg(self, reg: Register) -> bool:
if reg == self.global_info.arch.stack_pointer_reg:
return True
if reg == self.global_info.arch.frame_pointer_reg:
return self.uses_framepointer
return False
def get_struct_type_map(self) -> Dict["Expression", Dict[int, Type]]:
"""Reorganize struct information in unique_type_map by var & offset"""
struct_type_map: Dict[Expression, Dict[int, Type]] = {}
for (category, key), type in self.unique_type_map.items():
if category != "struct":
continue
var, offset = typing.cast(Tuple[Expression, int], key)
if var not in struct_type_map:
struct_type_map[var] = {}
struct_type_map[var][offset] = type
return struct_type_map
def __str__(self) -> str:
return "\n".join(
[
f"Stack info for function {self.function.name}:",
f"Allocated stack size: {self.allocated_stack_size}",
f"Leaf? {self.is_leaf}",
f"Bounds of callee-saved vars region: {self.callee_save_reg_region}",
f"Callee save registers: {self.callee_save_regs}",
]
)
def get_stack_info(
function: Function,
global_info: "GlobalInfo",
flow_graph: FlowGraph,
) -> StackInfo:
arch = global_info.arch
info = StackInfo(function, global_info, flow_graph)
# The goal here is to pick out special instructions that provide information
# about this function's stack setup.
#
# IDO puts local variables *above* the saved registers on the stack, but
# GCC puts local variables *below* the saved registers.
# To support both, we explicitly determine both the upper & lower bounds of the
# saved registers. Then, we estimate the boundary of the subroutine arguments
# by finding the lowest stack offset that is loaded from or computed. (This
# assumes that the compiler will never reuse a section of stack for *both*
# a local variable *and* a subroutine argument.) Anything within the stack frame,
# but outside of these two regions, is considered a local variable.
callee_saved_offsets: List[int] = []
# Track simple literal values stored into registers: MIPS compilers need a temp
# reg to move the stack pointer more than 0x7FFF bytes.
temp_reg_values: Dict[Register, int] = {}
for inst in flow_graph.entry_node().block.instructions:
arch_mnemonic = inst.arch_mnemonic(arch)
if inst.mnemonic in arch.instrs_fn_call:
break
elif arch_mnemonic == "mips:addiu" and inst.args[0] == arch.stack_pointer_reg:
# Moving the stack pointer on MIPS
assert isinstance(inst.args[2], AsmLiteral)
info.allocated_stack_size = abs(inst.args[2].signed_value())
elif (
arch_mnemonic == "mips:subu"
and inst.args[0] == arch.stack_pointer_reg
and inst.args[1] == arch.stack_pointer_reg
and inst.args[2] in temp_reg_values
):
# Moving the stack pointer more than 0x7FFF on MIPS
# TODO: This instruction needs to be ignored later in translation, in the
# same way that `addiu $sp, $sp, N` is ignored in handle_addi_real
assert isinstance(inst.args[2], Register)
info.allocated_stack_size = temp_reg_values[inst.args[2]]
elif arch_mnemonic == "ppc:stwu" and inst.args[0] == arch.stack_pointer_reg:
# Moving the stack pointer on PPC
assert isinstance(inst.args[1], AsmAddressMode)
assert isinstance(inst.args[1].lhs, AsmLiteral)
info.allocated_stack_size = abs(inst.args[1].lhs.signed_value())
elif (
arch_mnemonic == "mips:move"
and inst.args[0] == arch.frame_pointer_reg
and inst.args[1] == arch.stack_pointer_reg
):
# "move fp, sp" very likely means the code is compiled with frame
# pointers enabled; thus fp should be treated the same as sp.
info.uses_framepointer = True
elif (
arch_mnemonic
in [
"mips:sw",
"mips:swc1",
"mips:sdc1",
"ppc:stw",
"ppc:stmw",
"ppc:stfd",
"ppc:psq_st",
]
and isinstance(inst.args[0], Register)
and inst.args[0] in arch.saved_regs
and isinstance(inst.args[1], AsmAddressMode)
and inst.args[1].rhs == arch.stack_pointer_reg
and (
inst.args[0] not in info.callee_save_regs
or arch_mnemonic == "ppc:psq_st"
)
):
# Initial saving of callee-save register onto the stack.
if inst.args[0] in (arch.return_address_reg, Register("r0")):
# Saving the return address on the stack.
info.is_leaf = False
# The registers & their stack accesses must be matched up in ArchAsm.parse
for reg, mem in zip(inst.inputs, inst.outputs):
if isinstance(reg, Register) and isinstance(mem, StackLocation):
assert mem.symbolic_offset is None
stack_offset = mem.offset
if arch_mnemonic != "ppc:psq_st":
# psq_st instructions store the same register as stfd, just
# as packed singles instead. Prioritize the stfd.
info.callee_save_regs.add(reg)
callee_saved_offsets.append(stack_offset)
elif arch_mnemonic == "ppc:mflr" and inst.args[0] == Register("r0"):
info.is_leaf = False
elif arch_mnemonic == "mips:li" and inst.args[0] in arch.temp_regs:
assert isinstance(inst.args[0], Register)
assert isinstance(inst.args[1], AsmLiteral)
temp_reg_values[inst.args[0]] = inst.args[1].value
elif (
arch_mnemonic == "mips:ori"
and inst.args[0] == inst.args[1]
and inst.args[0] in temp_reg_values
):
assert isinstance(inst.args[0], Register)
assert isinstance(inst.args[2], AsmLiteral)
temp_reg_values[inst.args[0]] |= inst.args[2].value
if not info.is_leaf:
# Iterate over the whole function, not just the first basic block,
# to estimate the boundary for the subroutine argument region
info.subroutine_arg_top = info.allocated_stack_size
for node in flow_graph.nodes:
for inst in node.block.instructions:
arch_mnemonic = inst.arch_mnemonic(arch)
if (
arch_mnemonic in ["mips:lw", "mips:lwc1", "mips:ldc1", "ppc:lwz"]
and isinstance(inst.args[1], AsmAddressMode)
and inst.args[1].rhs == arch.stack_pointer_reg
and inst.args[1].lhs_as_literal() >= 16
):
info.subroutine_arg_top = min(
info.subroutine_arg_top, inst.args[1].lhs_as_literal()
)
elif (
arch_mnemonic == "mips:addiu"
and inst.args[0] != arch.stack_pointer_reg
and inst.args[1] == arch.stack_pointer_reg
and isinstance(inst.args[2], AsmLiteral)
and inst.args[2].value < info.allocated_stack_size
):
info.subroutine_arg_top = min(
info.subroutine_arg_top, inst.args[2].value
)
# Compute the bounds of the callee-saved register region, including padding
if callee_saved_offsets:
callee_saved_offsets.sort()
bottom = callee_saved_offsets[0]
# Both IDO & GCC save registers in two subregions:
# (a) One for double-sized registers
# (b) One for word-sized registers, padded to a multiple of 8 bytes
# IDO has (a) lower than (b); GCC has (b) lower than (a)
# Check that there are no gaps in this region, other than a single
# 4-byte word between subregions.
top = bottom
internal_padding_added = False
for offset in callee_saved_offsets:
if offset != top:
if not internal_padding_added and offset == top + 4:
internal_padding_added = True
else:
raise DecompFailure(
f"Gap in callee-saved word stack region. "
f"Saved: {callee_saved_offsets}, "
f"gap at: {offset} != {top}."
)
top = offset + 4
info.callee_save_reg_region = (bottom, top)
# Subroutine arguments must be at the very bottom of the stack, so they
# must come after the callee-saved region
info.subroutine_arg_top = min(info.subroutine_arg_top, bottom)
# Use a struct to represent the stack layout. If the struct is provided in the context,
# its fields will be used for variable types & names.
stack_struct_name = f"_mips2c_stack_{function.name}"
stack_struct = global_info.typepool.get_struct_by_tag_name(
stack_struct_name, global_info.typemap
)
if stack_struct is not None:
if stack_struct.size != info.allocated_stack_size:
raise DecompFailure(
f"Function {function.name} has a provided stack type {stack_struct_name} "
f"with size {stack_struct.size}, but the detected stack size was "
f"{info.allocated_stack_size}."
)
else:
stack_struct = StructDeclaration.unknown(
global_info.typepool,
size=info.allocated_stack_size,
tag_name=stack_struct_name,
)
# Mark the struct as a stack struct so we never try to use a reference to the struct itself
stack_struct.is_stack = True
stack_struct.new_field_prefix = "sp"
# This acts as the type of the $sp register
info.stack_pointer_type = Type.ptr(Type.struct(stack_struct))
return info
def format_hex(val: int) -> str:
return format(val, "x").upper()
def escape_byte(b: int) -> bytes:
table = {
b"\0": b"\\0",
b"\b": b"\\b",
b"\f": b"\\f",
b"\n": b"\\n",
b"\r": b"\\r",
b"\t": b"\\t",
b"\v": b"\\v",
b"\\": b"\\\\",
b'"': b'\\"',
}
bs = bytes([b])
if bs in table:
return table[bs]
if b < 0x20 or b in (0xFF, 0x7F):
return f"\\x{b:02x}".encode("ascii")
return bs
@dataclass(eq=False)
class Var:
stack_info: StackInfo = field(repr=False)
prefix: str
num_usages: int = 0
name: Optional[str] = None
def format(self, fmt: Formatter) -> str:
if self.name is None:
self.name = self.stack_info.temp_var(self.prefix)
return self.name
def __str__(self) -> str:
return "<temp>"
class Expression(abc.ABC):
type: Type
@abc.abstractmethod
def dependencies(self) -> List["Expression"]:
...
def use(self) -> None:
"""Mark an expression as "will occur in the output". Various subclasses
override this to provide special behavior; for instance, EvalOnceExpr
checks if it occurs more than once in the output and if so emits a temp.
It is important to get the number of use() calls correct:
* if use() is called but the expression is not emitted, it may cause
function calls to be silently dropped.
* if use() is not called but the expression is emitted, it may cause phi
variables to be printed as unnamed-phi($reg), without any assignment
to that phi.
* if use() is called once but the expression is emitted twice, it may
cause function calls to be duplicated."""
for expr in self.dependencies():
expr.use()
@abc.abstractmethod
def format(self, fmt: Formatter) -> str:
...
def __str__(self) -> str:
"""Stringify an expression for debug purposes. The output can change
depending on when this is called, e.g. because of EvalOnceExpr state.
To avoid using it by accident, output is quoted."""
fmt = Formatter(debug=True)
return '"' + self.format(fmt) + '"'
class Condition(Expression):
@abc.abstractmethod
def negated(self) -> "Condition":
...
class Statement(abc.ABC):
@abc.abstractmethod
def should_write(self) -> bool:
...
@abc.abstractmethod
def format(self, fmt: Formatter) -> str:
...
def __str__(self) -> str:
"""Stringify a statement for debug purposes. The output can change
depending on when this is called, e.g. because of EvalOnceExpr state.
To avoid using it by accident, output is quoted."""
fmt = Formatter(debug=True)
return '"' + self.format(fmt) + '"'
@dataclass(frozen=True, eq=False)
class ErrorExpr(Condition):
desc: Optional[str] = None
type: Type = field(default_factory=Type.any_reg)
def dependencies(self) -> List[Expression]:
return []
def negated(self) -> "Condition":
return self
def format(self, fmt: Formatter) -> str:
if self.desc is not None:
return f"MIPS2C_ERROR({self.desc})"
return "MIPS2C_ERROR()"
@dataclass(frozen=True)
class CommentExpr(Expression):
expr: Expression
type: Type = field(compare=False)
prefix: Optional[str] = None
suffix: Optional[str] = None
def dependencies(self) -> List[Expression]:
return [self.expr]
def format(self, fmt: Formatter) -> str:
expr_str = self.expr.format(fmt)
if fmt.coding_style.comment_style == CodingStyle.CommentStyle.NONE:
return expr_str
prefix_str = f"/* {self.prefix} */ " if self.prefix is not None else ""
suffix_str = f" /* {self.suffix} */" if self.suffix is not None else ""
return f"{prefix_str}{expr_str}{suffix_str}"
@staticmethod
def wrap(
expr: Expression, prefix: Optional[str] = None, suffix: Optional[str] = None
) -> Expression:
if prefix is None and suffix is None:
return expr
return CommentExpr(expr=expr, type=expr.type, prefix=prefix, suffix=suffix)
@dataclass(frozen=True, eq=False)
class SecondF64Half(Expression):
type: Type = field(default_factory=Type.any_reg)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
return "(second half of f64)"
@dataclass(frozen=True, eq=False)
class CarryBit(Expression):
type: Type = field(default_factory=Type.intish)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
return "MIPS2C_CARRY"
@staticmethod
def add_to(expr: Expression) -> "BinaryOp":
return fold_divmod(BinaryOp.intptr(expr, "+", CarryBit()))
@staticmethod
def sub_from(expr: Expression) -> "BinaryOp":
return BinaryOp.intptr(expr, "-", UnaryOp("!", CarryBit(), type=Type.intish()))
@dataclass(frozen=True, eq=False)
class BinaryOp(Condition):
left: Expression
op: str
right: Expression
type: Type
@staticmethod
def int(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_intish(left), op=op, right=as_intish(right), type=Type.intish()
)
@staticmethod
def int64(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_int64(left), op=op, right=as_int64(right), type=Type.int64()
)
@staticmethod
def intptr(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_intptr(left), op=op, right=as_intptr(right), type=Type.intptr()
)
@staticmethod
def icmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_intptr(left), op=op, right=as_intptr(right), type=Type.bool()
)
@staticmethod
def scmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_sintish(left, silent=True),
op=op,
right=as_sintish(right, silent=True),
type=Type.bool(),
)
@staticmethod
def sintptr_cmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_type(left, Type.sintptr(), False),
op=op,
right=as_type(right, Type.sintptr(), False),
type=Type.bool(),
)
@staticmethod
def ucmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_uintish(left), op=op, right=as_uintish(right), type=Type.bool()
)
@staticmethod
def uintptr_cmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_type(left, Type.uintptr(), False),
op=op,
right=as_type(right, Type.uintptr(), False),
type=Type.bool(),
)
@staticmethod
def fcmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_f32(left),
op=op,
right=as_f32(right),
type=Type.bool(),
)
@staticmethod
def dcmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_f64(left),
op=op,
right=as_f64(right),
type=Type.bool(),
)
@staticmethod
def sint(
left: Expression, op: str, right: Expression, *, silent: bool = False
) -> "BinaryOp":
return BinaryOp(
left=as_sintish(left, silent=silent),
op=op,
right=as_sintish(right, silent=silent),
type=Type.s32(),
)
@staticmethod
def uint(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_uintish(left), op=op, right=as_uintish(right), type=Type.u32()
)
@staticmethod
def s64(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(left=as_s64(left), op=op, right=as_s64(right), type=Type.s64())
@staticmethod
def u64(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(left=as_u64(left), op=op, right=as_u64(right), type=Type.u64())
@staticmethod
def f32(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_f32(left),
op=op,
right=as_f32(right),
type=Type.f32(),
)
@staticmethod
def f64(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_f64(left),
op=op,
right=as_f64(right),
type=Type.f64(),
)
def is_comparison(self) -> bool:
return self.op in ["==", "!=", ">", "<", ">=", "<="]
def is_floating(self) -> bool:
return self.left.type.is_float() and self.right.type.is_float()
def negated(self) -> "Condition":
if (
self.op in ["&&", "||"]
and isinstance(self.left, Condition)
and isinstance(self.right, Condition)
):
# DeMorgan's Laws
return BinaryOp(
left=self.left.negated(),
op={"&&": "||", "||": "&&"}[self.op],
right=self.right.negated(),
type=Type.bool(),
)
if not self.is_comparison() or (
self.is_floating() and self.op in ["<", ">", "<=", ">="]
):
# Floating-point comparisons cannot be negated in any nice way,
# due to nans.
return UnaryOp("!", self, type=Type.bool())
return BinaryOp(
left=self.left,
op={"==": "!=", "!=": "==", ">": "<=", "<": ">=", ">=": "<", "<=": ">"}[
self.op
],
right=self.right,
type=Type.bool(),
)
def dependencies(self) -> List[Expression]:
return [self.left, self.right]
def format(self, fmt: Formatter) -> str:
left_expr = late_unwrap(self.left)
right_expr = late_unwrap(self.right)
if (
self.is_comparison()
and isinstance(left_expr, Literal)
and not isinstance(right_expr, Literal)
):
return BinaryOp(
left=right_expr,
op=self.op.translate(str.maketrans("<>", "><")),
right=left_expr,
type=self.type,
).format(fmt)
if (
not self.is_floating()
and isinstance(right_expr, Literal)
and right_expr.value < 0
):
if self.op == "+":
neg = Literal(value=-right_expr.value, type=right_expr.type)
sub = BinaryOp(op="-", left=left_expr, right=neg, type=self.type)
return sub.format(fmt)
if self.op in ("&", "|"):
neg = Literal(value=~right_expr.value, type=right_expr.type)
right = UnaryOp("~", neg, type=Type.any_reg())
expr = BinaryOp(op=self.op, left=left_expr, right=right, type=self.type)
return expr.format(fmt)
# For commutative, left-associative operations, strip unnecessary parentheses.
lhs = left_expr.format(fmt)
if (
isinstance(left_expr, BinaryOp)
and left_expr.op == self.op
and self.op in ASSOCIATIVE_OPS
):
lhs = lhs[1:-1]
# For certain operators, use base-10 (decimal) for the RHS
if self.op in ("/", "%") and isinstance(right_expr, Literal):
rhs = right_expr.format(fmt, force_dec=True)
else:
rhs = right_expr.format(fmt)
# These aren't real operators (or functions); format them as a fn call
if self.op in PSEUDO_FUNCTION_OPS:
return f"{self.op}({lhs}, {rhs})"
return f"({lhs} {self.op} {rhs})"
@dataclass(frozen=True, eq=False)
class TernaryOp(Expression):
cond: Condition
left: Expression
right: Expression
type: Type
def dependencies(self) -> List[Expression]:
return [self.cond, self.left, self.right]
def format(self, fmt: Formatter) -> str:
cond_str = simplify_condition(self.cond).format(fmt)
left_str = self.left.format(fmt)
right_str = self.right.format(fmt)
return f"({cond_str} ? {left_str} : {right_str})"
@dataclass(frozen=True, eq=False)
class UnaryOp(Condition):
op: str
expr: Expression
type: Type
def dependencies(self) -> List[Expression]:
return [self.expr]
@staticmethod
def sint(op: str, expr: Expression) -> "UnaryOp":
expr = as_sintish(expr, silent=True)
return UnaryOp(
op=op,
expr=expr,
type=expr.type,
)
def negated(self) -> "Condition":
if self.op == "!" and isinstance(self.expr, (UnaryOp, BinaryOp)):
return self.expr
return UnaryOp("!", self, type=Type.bool())
def format(self, fmt: Formatter) -> str:
# These aren't real operators (or functions); format them as a fn call
if self.op in PSEUDO_FUNCTION_OPS:
return f"{self.op}({self.expr.format(fmt)})"
return f"{self.op}{self.expr.format(fmt)}"
@dataclass(frozen=True, eq=False)
class ExprCondition(Condition):
expr: Expression
type: Type
is_negated: bool = False
def dependencies(self) -> List[Expression]:
return [self.expr]
def negated(self) -> "Condition":
return ExprCondition(self.expr, self.type, not self.is_negated)
def format(self, fmt: Formatter) -> str:
neg = "!" if self.is_negated else ""
return f"{neg}{self.expr.format(fmt)}"
@dataclass(frozen=True, eq=False)
class CommaConditionExpr(Condition):
statements: List["Statement"]
condition: "Condition"
type: Type = Type.bool()
def dependencies(self) -> List[Expression]:
assert False, "CommaConditionExpr should not be used within translate.py"
return []
def negated(self) -> "Condition":
return CommaConditionExpr(self.statements, self.condition.negated())
def format(self, fmt: Formatter) -> str:
comma_joined = ", ".join(
stmt.format(fmt).rstrip(";") for stmt in self.statements
)
return f"({comma_joined}, {self.condition.format(fmt)})"
@dataclass(frozen=True, eq=False)
class Cast(Expression):
expr: Expression
type: Type
reinterpret: bool = False
silent: bool = True
def dependencies(self) -> List[Expression]:
return [self.expr]
def use(self) -> None:
# Try to unify, to make stringification output better.
self.expr.type.unify(self.type)
super().use()
def needed_for_store(self) -> bool:
if not self.reinterpret:
# int <-> float casts should be emitted even for stores.
return True
if not self.expr.type.unify(self.type):
# Emit casts when types fail to unify.
return True
return False
def is_trivial(self) -> bool:
return (
self.reinterpret
and self.expr.type.is_float() == self.type.is_float()
and is_trivial_expression(self.expr)
)
def format(self, fmt: Formatter) -> str:
if self.reinterpret and self.expr.type.is_float() != self.type.is_float():
# This shouldn't happen, but mark it in the output if it does.
if fmt.valid_syntax:
return (
f"MIPS2C_BITWISE({self.type.format(fmt)}, {self.expr.format(fmt)})"
)
return f"(bitwise {self.type.format(fmt)}) {self.expr.format(fmt)}"
if self.reinterpret and (
self.silent
or (is_type_obvious(self.expr) and self.expr.type.unify(self.type))
):
return self.expr.format(fmt)
if fmt.skip_casts:
return self.expr.format(fmt)
# Function casts require special logic because function calls have
# higher precedence than casts
fn_sig = self.type.get_function_pointer_signature()
if fn_sig:
prototype_sig = self.expr.type.get_function_pointer_signature()
if not prototype_sig or not prototype_sig.unify_with_args(fn_sig):
# A function pointer cast is required if the inner expr is not
# a function pointer, or has incompatible argument types
return f"(({self.type.format(fmt)}) {self.expr.format(fmt)})"
if not prototype_sig.return_type.unify(fn_sig.return_type):
# Only cast the return value of the call
return f"({fn_sig.return_type.format(fmt)}) {self.expr.format(fmt)}"
# No cast needed
return self.expr.format(fmt)
return f"({self.type.format(fmt)}) {self.expr.format(fmt)}"
@dataclass(frozen=True, eq=False)
class FuncCall(Expression):
function: Expression
args: List[Expression]
type: Type
def dependencies(self) -> List[Expression]:
return self.args + [self.function]
def format(self, fmt: Formatter) -> str:
# TODO: The function type may have a different number of params than it had
# when the FuncCall was created. Should we warn that there may be the wrong
# number of arguments at this callsite?
args = ", ".join(format_expr(arg, fmt) for arg in self.args)
return f"{self.function.format(fmt)}({args})"
@dataclass(frozen=True, eq=True)
class LocalVar(Expression):
value: int
type: Type = field(compare=False)
path: Optional[AccessPath] = field(compare=False)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
fallback_name = f"unksp{format_hex(self.value)}"
if self.path is None:
return fallback_name
name = StructAccess.access_path_to_field_name(self.path, fmt)
if name.startswith("->"):
return name[2:]
return fallback_name
def toplevel_decl(self, fmt: Formatter) -> Optional[str]:
"""Return a declaration for this LocalVar, if required."""
# If len(self.path) > 2, then this local is an inner field of another
# local, so it doesn't need to be declared.
if (
self.path is None
or len(self.path) != 2
or not isinstance(self.path[1], str)
):
return None
return self.type.to_decl(self.path[1], fmt)
@dataclass(frozen=True, eq=False)
class RegisterVar(Expression):
reg: Register
name: str
type: Type
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
return self.name
@dataclass(frozen=True, eq=True)
class PassedInArg(Expression):
value: int
copied: bool = field(compare=False)
stack_info: StackInfo = field(compare=False, repr=False)
type: Type = field(compare=False)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
assert self.value % 4 == 0
name = self.stack_info.get_param_name(self.value)
return name or f"arg{format_hex(self.value // 4)}"
@dataclass(frozen=True, eq=True)
class SubroutineArg(Expression):
value: int
type: Type = field(compare=False)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
return f"subroutine_arg{format_hex(self.value // 4)}"
@dataclass(eq=True, unsafe_hash=True)
class StructAccess(Expression):
# Represents struct_var->offset.
# This has eq=True since it represents a live expression and not an access
# at a certain point in time -- this sometimes helps get rid of phi nodes.
# prevent_later_uses makes sure it's not used after writes/function calls
# that may invalidate it.
struct_var: Expression
offset: int
target_size: Optional[int]
field_path: Optional[AccessPath] = field(compare=False)
stack_info: Optional[StackInfo] = field(compare=False, repr=False)
type: Type = field(compare=False)
checked_late_field_path: bool = field(default=False, compare=False)
def __post_init__(self) -> None:
# stack_info is used to resolve field_path late
assert (
self.stack_info is not None or self.field_path is not None
), "Must provide at least one of (stack_info, field_path)"
self.assert_valid_field_path(self.field_path)
@staticmethod
def assert_valid_field_path(path: Optional[AccessPath]) -> None:
assert path is None or (
path and isinstance(path[0], int)
), "The first element of the field path, if present, must be an int"
@classmethod
def access_path_to_field_name(cls, path: AccessPath, fmt: Formatter) -> str:
"""
Convert an access path into a dereferencing field name, like the following examples:
- `[0, "foo", 3, "bar"]` into `"->foo[3].bar"`
- `[0, 3, "bar"]` into `"[0][3].bar"`
- `[0, 1, 2]` into `"[0][1][2]"
- `[0]` into `"[0]"`
The path must have at least one element, and the first element must be an int.
"""
cls.assert_valid_field_path(path)
output = ""
# Replace an initial "[0]." with "->"
if len(path) >= 2 and path[0] == 0 and isinstance(path[1], str):
output += f"->{path[1]}"
path = path[2:]
for p in path:
if isinstance(p, str):
output += f".{p}"
elif isinstance(p, int):
output += f"[{fmt.format_int(p)}]"
else:
static_assert_unreachable(p)
return output
def dependencies(self) -> List[Expression]:
return [self.struct_var]
def make_reference(self) -> Optional["StructAccess"]:
field_path = self.late_field_path()
if field_path and len(field_path) >= 2 and field_path[-1] == 0:
return replace(self, field_path=field_path[:-1])
return None
def late_field_path(self) -> Optional[AccessPath]:
# If we didn't have a type at the time when the struct access was
# constructed, but now we do, compute field name.
if self.field_path is None and not self.checked_late_field_path:
var = late_unwrap(self.struct_var)
# Format var to recursively resolve any late_field_path it has to
# potentially improve var.type before we look up our field name
var.format(Formatter())
field_path, field_type, _ = var.type.get_deref_field(
self.offset, target_size=self.target_size
)
if field_path is not None:
self.assert_valid_field_path(field_path)
self.field_path = field_path
self.type.unify(field_type)
self.checked_late_field_path = True
return self.field_path
def late_has_known_type(self) -> bool:
if self.late_field_path() is not None:
return True
assert (
self.stack_info is not None
), "StructAccess must have stack_info if field_path isn't set"
if self.offset == 0:
var = late_unwrap(self.struct_var)
if (
not self.stack_info.has_nonzero_access(var)
and isinstance(var, AddressOf)
and isinstance(var.expr, GlobalSymbol)
and var.expr.type_provided
):
return True
return False
def format(self, fmt: Formatter) -> str:
var = late_unwrap(self.struct_var)
has_nonzero_access = False
if self.stack_info is not None:
has_nonzero_access = self.stack_info.has_nonzero_access(var)
field_path = self.late_field_path()
if field_path is not None and field_path != [0]:
has_nonzero_access = True
elif fmt.valid_syntax and (self.offset != 0 or has_nonzero_access):
offset_str = fmt.format_int(self.offset)
return f"MIPS2C_FIELD({var.format(fmt)}, {Type.ptr(self.type).format(fmt)}, {offset_str})"
else:
prefix = "unk" + ("_" if fmt.coding_style.unknown_underscore else "")
field_path = [0, prefix + format_hex(self.offset)]
field_name = self.access_path_to_field_name(field_path, fmt)
# Rewrite `(&x)->y` to `x.y` by stripping `AddressOf` & setting deref=False
deref = True
if (
isinstance(var, AddressOf)
and not var.expr.type.is_array()
and field_name.startswith("->")
):
var = var.expr
field_name = field_name.replace("->", ".", 1)
deref = False
# Rewrite `x->unk0` to `*x` and `x.unk0` to `x`, unless has_nonzero_access
if self.offset == 0 and not has_nonzero_access:
return f"{"*" if deref else ""}{var.format(fmt)}"
return f"{parenthesize_for_struct_access(var, fmt)}{field_name}"
@dataclass(frozen=True, eq=True)
class ArrayAccess(Expression):
# Represents ptr[index]. eq=True for symmetry with StructAccess.
ptr: Expression
index: Expression
type: Type = field(compare=False)
def dependencies(self) -> List[Expression]:
return [self.ptr, self.index]
def format(self, fmt: Formatter) -> str:
base = parenthesize_for_struct_access(self.ptr, fmt)
index = format_expr(self.index, fmt)
return f"{base}[{index}]"
@dataclass(eq=False)
class GlobalSymbol(Expression):
symbol_name: str
type: Type
asm_data_entry: Optional[AsmDataEntry] = None
symbol_in_context: bool = False
type_provided: bool = False
initializer_in_typemap: bool = False
demangled_str: Optional[str] = None
def dependencies(self) -> List[Expression]:
return []
def is_string_constant(self) -> bool:
ent = self.asm_data_entry
if not ent or not ent.is_string:
return False
return len(ent.data) == 1 and isinstance(ent.data[0], bytes)
def format_string_constant(self, fmt: Formatter) -> str:
assert self.is_string_constant(), "checked by caller"
assert self.asm_data_entry and isinstance(self.asm_data_entry.data[0], bytes)
has_trailing_null = False
data = self.asm_data_entry.data[0]
while data and data[-1] == 0:
data = data[:-1]
has_trailing_null = True
data = b"".join(map(escape_byte, data))
strdata = data.decode("utf-8", "backslashreplace")
ret = f'"{strdata}"'
if not has_trailing_null:
ret += " /* not null-terminated */"
return ret
def format(self, fmt: Formatter) -> str:
return self.symbol_name
def potential_array_dim(self, element_size: int) -> Tuple[int, int]:
"""
Using the size of the symbol's `asm_data_entry` and a potential array element
size, return the corresponding array dimension and number of "extra" bytes left
at the end of the symbol's data.
If the extra bytes are nonzero, then it's likely that `element_size` is incorrect.
"""
# If we don't have the .data/.rodata entry for this symbol, we can't guess
# its array dimension. Jump tables are ignored and not treated as arrays.
if self.asm_data_entry is None or self.asm_data_entry.is_jtbl:
return 0, element_size
min_data_size, max_data_size = self.asm_data_entry.size_range_bytes()
if element_size > max_data_size:
# The type is too big for the data (not an array)
return 0, max_data_size
# Check if it's possible that this symbol is not an array, and is just 1 element
if min_data_size <= element_size <= max_data_size and not self.type.is_array():
return 1, 0
array_dim, extra_bytes = divmod(min_data_size, element_size)
if extra_bytes != 0:
# If it's not possible to make an exact multiple of element_size by incorporating
# bytes from the padding, then indicate that in the return value.
padding_bytes = element_size - extra_bytes
if min_data_size + padding_bytes > max_data_size:
return array_dim, extra_bytes
# Include potential padding in the array. Although this is unlikely to match the original C,
# it's much easier to manually remove all or some of these elements than to add them back in.
return max_data_size // element_size, 0
@dataclass(frozen=True, eq=True)
class Literal(Expression):
value: int
type: Type = field(compare=False, default_factory=Type.any)
elide_cast: bool = field(compare=False, default=False)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter, force_dec: bool = False) -> str:
enum_name = self.type.get_enum_name(self.value)
if enum_name is not None:
return enum_name
if self.type.is_likely_float():
if self.type.get_size_bits() == 64:
return format_f64_imm(self.value)
else:
return format_f32_imm(self.value) + "f"
if self.type.is_pointer() and self.value == 0:
return "NULL"
prefix = ""
suffix = ""
if not fmt.skip_casts and not self.elide_cast:
if self.type.is_pointer():
prefix = f"({self.type.format(fmt)})"
if self.type.is_unsigned():
suffix = "U"
if force_dec:
value = str(self.value)
else:
size_bits = self.type.get_size_bits()
v = self.value
# The top 2 bits are tested rather than just the sign bit
# to help prevent N64 VRAM pointers (0x80000000+) turning negative
if (
self.type.is_signed()
and size_bits
and v & (1 << (size_bits - 1))
and v > (3 << (size_bits - 2))
and v < 2 ** size_bits
):
v -= 1 << size_bits
value = fmt.format_int(v, size_bits=size_bits)
return prefix + value + suffix
def likely_partial_offset(self) -> bool:
return self.value % 2 ** 15 in (0, 2 ** 15 - 1) and self.value < 0x1000000
@dataclass(frozen=True, eq=True)
class AddressOf(Expression):
expr: Expression
type: Type = field(compare=False, default_factory=Type.ptr)
def dependencies(self) -> List[Expression]:
return [self.expr]
def format(self, fmt: Formatter) -> str:
if isinstance(self.expr, GlobalSymbol):
if self.expr.is_string_constant():
return self.expr.format_string_constant(fmt)
if self.expr.type.is_array():
return f"{self.expr.format(fmt)}"
if self.expr.type.is_function():
# Functions are automatically converted to function pointers
# without an explicit `&` by the compiler
return f"{self.expr.format(fmt)}"
if isinstance(self.expr, StructAccess):
# Simplify `&x[0]` into `x`
ref = self.expr.make_reference()
if ref:
return f"{ref.format(fmt)}"
return f"&{self.expr.format(fmt)}"
@dataclass(frozen=True)
class Lwl(Expression):
load_expr: Expression
key: Tuple[int, object]
type: Type = field(compare=False, default_factory=Type.any_reg)
def dependencies(self) -> List[Expression]:
return [self.load_expr]
def format(self, fmt: Formatter) -> str:
return f"MIPS2C_LWL({self.load_expr.format(fmt)})"
@dataclass(frozen=True)
class Load3Bytes(Expression):
load_expr: Expression
type: Type = field(compare=False, default_factory=Type.any_reg)
def dependencies(self) -> List[Expression]:
return [self.load_expr]
def format(self, fmt: Formatter) -> str:
if fmt.valid_syntax:
return f"MIPS2C_FIRST3BYTES({self.load_expr.format(fmt)})"
return f"(first 3 bytes) {self.load_expr.format(fmt)}"
@dataclass(frozen=True)
class UnalignedLoad(Expression):
load_expr: Expression
type: Type = field(compare=False, default_factory=Type.any_reg)
def dependencies(self) -> List[Expression]:
return [self.load_expr]
def format(self, fmt: Formatter) -> str:
if fmt.valid_syntax:
return f"MIPS2C_UNALIGNED32({self.load_expr.format(fmt)})"
return f"(unaligned s32) {self.load_expr.format(fmt)}"
@dataclass(frozen=False, eq=False)
class EvalOnceExpr(Expression):
wrapped_expr: Expression
var: Var
type: Type
# True for function calls/errors
emit_exactly_once: bool
# Mutable state:
# True if this EvalOnceExpr should be totally transparent and not emit a variable,
# It may dynamically change from true to false due to forced emissions.
# Initially, it is based on is_trivial_expression.
trivial: bool
# True if this EvalOnceExpr must emit a variable (see RegMeta.force)
forced_emit: bool = False
# The number of expressions that depend on this EvalOnceExpr; we emit a variable
# if this is > 1.
num_usages: int = 0
def dependencies(self) -> List[Expression]:
# (this is a bit iffy since state can change over time, but improves uses_expr)
if self.need_decl():
return []
return [self.wrapped_expr]
def use(self) -> None:
self.num_usages += 1
if self.trivial or (self.num_usages == 1 and not self.emit_exactly_once):
self.wrapped_expr.use()
def force(self) -> None:
# Transition to non-trivial, and mark as used multiple times to force a var.
# TODO: If it was originally trivial, we may previously have marked its
# wrappee used multiple times, even though we now know that it should
# have been marked just once... We could fix that by moving marking of
# trivial EvalOnceExpr's to the very end. At least the consequences of
# getting this wrong are pretty mild -- it just causes extraneous var
# emission in rare cases.
self.trivial = False
self.forced_emit = True
self.use()
self.use()
def need_decl(self) -> bool:
return self.num_usages > 1 and not self.trivial
def format(self, fmt: Formatter) -> str:
if not self.need_decl():
return self.wrapped_expr.format(fmt)
else:
return self.var.format(fmt)
@dataclass(frozen=False, eq=False)
class PhiExpr(Expression):
reg: Register
node: Node
type: Type
used_phis: List["PhiExpr"]
name: Optional[str] = None
num_usages: int = 0
replacement_expr: Optional[Expression] = None
used_by: Optional["PhiExpr"] = None
def dependencies(self) -> List[Expression]:
return []
def get_var_name(self) -> str:
return self.name or f"unnamed-phi({self.reg.register_name})"
def use(self, from_phi: Optional["PhiExpr"] = None) -> None:
if self.num_usages == 0:
self.used_phis.append(self)
self.used_by = from_phi
self.num_usages += 1
if self.used_by != from_phi:
self.used_by = None
if self.replacement_expr is not None:
self.replacement_expr.use()
def propagates_to(self) -> "PhiExpr":
"""Compute the phi that stores to this phi should propagate to. This is
usually the phi itself, but if the phi is only once for the purpose of
computing another phi, we forward the store there directly. This is
admittedly a bit sketchy, in case the phi is in scope here and used
later on... but we have that problem with regular phi assignments as
well."""
if self.used_by is None or self.replacement_expr is not None:
return self
return self.used_by.propagates_to()
def format(self, fmt: Formatter) -> str:
if self.replacement_expr:
return self.replacement_expr.format(fmt)
return self.get_var_name()
@dataclass
class SwitchControl:
control_expr: Expression
jump_table: Optional[GlobalSymbol] = None
offset: int = 0
is_irregular: bool = False
def matches_guard_condition(self, cond: Condition) -> bool:
"""
Return True if `cond` is one of:
- `((control_expr + (-offset)) >= len(jump_table))`, if `offset != 0`
- `(control_expr >= len(jump_table))`, if `offset == 0`
These are the appropriate bounds checks before using `jump_table`.
"""
cmp_expr = simplify_condition(cond)
if not isinstance(cmp_expr, BinaryOp) or cmp_expr.op not in (">=", ">"):
return False
cmp_exclusive = cmp_expr.op == ">"
# The LHS may have been wrapped in a u32 cast
left_expr = late_unwrap(cmp_expr.left)
if isinstance(left_expr, Cast):
left_expr = late_unwrap(left_expr.expr)
if self.offset != 0:
if (
not isinstance(left_expr, BinaryOp)
or late_unwrap(left_expr.left) != late_unwrap(self.control_expr)
or left_expr.op != "+"
or late_unwrap(left_expr.right) != Literal(-self.offset)
):
return False
elif left_expr != late_unwrap(self.control_expr):
return False
right_expr = late_unwrap(cmp_expr.right)
if (
self.jump_table is None
or self.jump_table.asm_data_entry is None
or not self.jump_table.asm_data_entry.is_jtbl
or not isinstance(right_expr, Literal)
):
return False
# Count the number of labels (exclude padding bytes)
jump_table_len = sum(
isinstance(e, str) for e in self.jump_table.asm_data_entry.data
)
return right_expr.value + int(cmp_exclusive) == jump_table_len
@staticmethod
def irregular_from_expr(control_expr: Expression) -> "SwitchControl":
"""
Return a SwitchControl representing a "irregular" switch statement.
The switch does not have a single jump table; instead it is a series of
if statements & other switches.
"""
return SwitchControl(
control_expr=control_expr,
jump_table=None,
offset=0,
is_irregular=True,
)
@staticmethod
def from_expr(expr: Expression) -> "SwitchControl":
"""
Try to convert `expr` into a SwitchControl from one of the following forms:
- `*(&jump_table + (control_expr * 4))`
- `*(&jump_table + ((control_expr + (-offset)) * 4))`
If `offset` is not present, it defaults to 0.
If `expr` does not match, return a thin wrapper around the input expression,
with `jump_table` set to `None`.
"""
# The "error" expression we use if we aren't able to parse `expr`
error_expr = SwitchControl(expr)
# Match `*(&jump_table + (control_expr * 4))`
struct_expr = early_unwrap(expr)
if not isinstance(struct_expr, StructAccess) or struct_expr.offset != 0:
return error_expr
add_expr = early_unwrap(struct_expr.struct_var)
if not isinstance(add_expr, BinaryOp) or add_expr.op != "+":
return error_expr
# Check for either `*(&jump_table + (control_expr * 4))` and `*((control_expr * 4) + &jump_table)`
left_expr, right_expr = early_unwrap(add_expr.left), early_unwrap(
add_expr.right
)
if isinstance(left_expr, AddressOf) and isinstance(
left_expr.expr, GlobalSymbol
):
jtbl_addr_expr, mul_expr = left_expr, right_expr
elif isinstance(right_expr, AddressOf) and isinstance(
right_expr.expr, GlobalSymbol
):
mul_expr, jtbl_addr_expr = left_expr, right_expr
else:
return error_expr
jump_table = jtbl_addr_expr.expr
assert isinstance(jump_table, GlobalSymbol)
if (
not isinstance(mul_expr, BinaryOp)
or mul_expr.op != "*"
or early_unwrap(mul_expr.right) != Literal(4)
):
return error_expr
control_expr = mul_expr.left
# Optionally match `control_expr + (-offset)`
offset = 0
uw_control_expr = early_unwrap(control_expr)
if isinstance(uw_control_expr, BinaryOp) and uw_control_expr.op == "+":
offset_lit = early_unwrap(uw_control_expr.right)
if isinstance(offset_lit, Literal):
control_expr = uw_control_expr.left
offset = -offset_lit.value
# Check that it is really a jump table
if jump_table.asm_data_entry is None or not jump_table.asm_data_entry.is_jtbl:
return error_expr
return SwitchControl(control_expr, jump_table, offset)
@dataclass
class EvalOnceStmt(Statement):
expr: EvalOnceExpr
def need_decl(self) -> bool:
return self.expr.need_decl()
def should_write(self) -> bool:
if self.expr.emit_exactly_once:
return self.expr.num_usages != 1
else:
return self.need_decl()
def format(self, fmt: Formatter) -> str:
val_str = format_expr(elide_casts_for_store(self.expr.wrapped_expr), fmt)
if self.expr.emit_exactly_once and self.expr.num_usages == 0:
return f"{val_str};"
return f"{self.expr.var.format(fmt)} = {val_str};"
@dataclass
class SetPhiStmt(Statement):
phi: PhiExpr
expr: Expression
def should_write(self) -> bool:
expr = self.expr
if isinstance(expr, PhiExpr) and expr.propagates_to() != expr:
# When we have phi1 = phi2, and phi2 is only used in this place,
# the SetPhiStmt for phi2 will store directly to phi1 and we can
# skip this store.
assert expr.propagates_to() == self.phi.propagates_to()
return False
if late_unwrap(expr) == self.phi.propagates_to():
# Elide "phi = phi".
return False
return True
def format(self, fmt: Formatter) -> str:
return format_assignment(self.phi.propagates_to(), self.expr, fmt)
@dataclass
class ExprStmt(Statement):
expr: Expression
def should_write(self) -> bool:
return True
def format(self, fmt: Formatter) -> str:
return f"{format_expr(self.expr, fmt)};"
@dataclass
class StoreStmt(Statement):
source: Expression
dest: Expression
def should_write(self) -> bool:
return True
def format(self, fmt: Formatter) -> str:
dest = self.dest
source = self.source
if (
isinstance(dest, StructAccess) and dest.late_has_known_type()
) or isinstance(dest, (ArrayAccess, LocalVar, RegisterVar, SubroutineArg)):
# Known destination; fine to elide some casts.
source = elide_casts_for_store(source)
return format_assignment(dest, source, fmt)
@dataclass
class CommentStmt(Statement):
contents: str
def should_write(self) -> bool:
return True
def format(self, fmt: Formatter) -> str:
return f"// {self.contents}"
def error_stmt(msg: str) -> ExprStmt:
return ExprStmt(ErrorExpr(msg))
@dataclass(frozen=True)
class AddressMode:
offset: int
rhs: Register
def __str__(self) -> str:
if self.offset:
return f"{self.offset}({self.rhs})"
else:
return f"({self.rhs})"
@dataclass(frozen=True)
class RawSymbolRef:
offset: int
sym: AsmGlobalSymbol
def __str__(self) -> str:
if self.offset:
return f"{self.sym.symbol_name} + {self.offset}"
else:
return self.sym.symbol_name
@dataclass
class RegMeta:
# True if this regdata is unchanged from the start of the block
inherited: bool = False
# True if this regdata is read by some later node
is_read: bool = False
# True if the value derives solely from function call return values
function_return: bool = False
# True if the value derives solely from regdata's with is_read = True,
# function_return = True, or is a passed in argument
uninteresting: bool = False
# True if the regdata must be replaced by variable if it is ever read
force: bool = False
# True if the regdata was assigned by an Instruction marked as in_pattern;
# it was part of a matched IR pattern but couldn't be elided at the time
in_pattern: bool = False
@dataclass
class RegData:
value: Expression
meta: RegMeta
@dataclass
class RegInfo:
stack_info: StackInfo = field(repr=False)
contents: Dict[Register, RegData] = field(default_factory=dict)
read_inherited: Set[Register] = field(default_factory=set)
_active_instr: Optional[Instruction] = None
def __getitem__(self, key: Register) -> Expression:
if self._active_instr is not None and key not in self._active_instr.inputs:
lineno = self._active_instr.meta.lineno
return ErrorExpr(f"Read from unset register {key} on line {lineno}")
if key == Register("zero"):
return Literal(0)
data = self.contents.get(key)
if data is None:
return ErrorExpr(f"Read from unset register {key}")
ret = data.value
data.meta.is_read = True
if data.meta.inherited:
self.read_inherited.add(key)
if isinstance(ret, PassedInArg) and not ret.copied:
# Create a new argument object to better distinguish arguments we
# are called with from arguments passed to subroutines. Also, unify
# the argument's type with what we can guess from the register used.
val, arg = self.stack_info.get_argument(ret.value)
self.stack_info.add_argument(arg)
val.type.unify(ret.type)
return val
if data.meta.force:
assert isinstance(ret, EvalOnceExpr)
ret.force()
return ret
def __contains__(self, key: Register) -> bool:
return key in self.contents
def __setitem__(self, key: Register, value: Expression) -> None:
self.set_with_meta(key, value, RegMeta())
def set_with_meta(self, key: Register, value: Expression, meta: RegMeta) -> None:
if self._active_instr is not None and key not in self._active_instr.outputs:
raise DecompFailure(f"Undeclared write to {key} in {self._active_instr}")
self.unchecked_set_with_meta(key, value, meta)
def unchecked_set_with_meta(
self, key: Register, value: Expression, meta: RegMeta
) -> None:
assert key != Register("zero")
self.contents[key] = RegData(value, meta)
def __delitem__(self, key: Register) -> None:
assert key != Register("zero")
del self.contents[key]
def get_raw(self, key: Register) -> Optional[Expression]:
data = self.contents.get(key)
return data.value if data is not None else None
def get_meta(self, key: Register) -> Optional[RegMeta]:
data = self.contents.get(key)
return data.meta if data is not None else None
@contextmanager
def current_instr(self, instr: Instruction) -> Iterator[None]:
self._active_instr = instr
try:
with current_instr(instr):
yield
finally:
self._active_instr = None
def __str__(self) -> str:
return ", ".join(
f"{k}: {v.value}"
for k, v in sorted(self.contents.items(), key=lambda x: x[0].register_name)
if not self.stack_info.should_save(v.value, None)
)
@dataclass
class BlockInfo:
"""
Contains translated assembly code (to_write), the block's branch condition,
and block's final register states.
"""
to_write: List[Statement]
return_value: Optional[Expression]
switch_control: Optional[SwitchControl]
branch_condition: Optional[Condition]
final_register_states: RegInfo
has_function_call: bool
def __str__(self) -> str:
newline = "\n\t"
return "\n".join(
[
f"Statements: {newline.join(str(w) for w in self.statements_to_write())}",
f"Branch condition: {self.branch_condition}",
f"Final register states: {self.final_register_states}",
]
)
def statements_to_write(self) -> List[Statement]:
return [st for st in self.to_write if st.should_write()]
def get_block_info(node: Node) -> BlockInfo:
ret = node.block.block_info
assert isinstance(ret, BlockInfo)
return ret
@dataclass
class InstrArgs:
raw_args: List[Argument]
regs: RegInfo = field(repr=False)
stack_info: StackInfo = field(repr=False)
def raw_arg(self, index: int) -> Argument:
assert index >= 0
if index >= len(self.raw_args):
raise DecompFailure(
f"Too few arguments for instruction, expected at least {index + 1}"
)
return self.raw_args[index]
def reg_ref(self, index: int) -> Register:
ret = self.raw_arg(index)
if not isinstance(ret, Register):
raise DecompFailure(
f"Expected instruction argument to be a register, but found {ret}"
)
return ret
def imm_value(self, index: int) -> int:
arg = self.full_imm(index)
assert isinstance(arg, Literal)
return arg.value
def reg(self, index: int) -> Expression:
return self.regs[self.reg_ref(index)]
def dreg(self, index: int) -> Expression:
"""Extract a double from a register. This may involve reading both the
mentioned register and the next."""
reg = self.reg_ref(index)
if not reg.is_float():
raise DecompFailure(
f"Expected instruction argument {reg} to be a float register"
)
ret = self.regs[reg]
# PPC: FPR's hold doubles (64 bits), so we don't need to do anything special
if self.stack_info.global_info.arch.arch == Target.ArchEnum.PPC:
return ret
# MIPS: Look at the paired FPR to get the full 64-bit value
if not isinstance(ret, Literal) or ret.type.get_size_bits() == 64:
return ret
reg_num = int(reg.register_name[1:])
if reg_num % 2 != 0:
raise DecompFailure(
"Tried to use a double-precision instruction with odd-numbered float "
f"register {reg}"
)
other = self.regs[Register(f"f{reg_num+1}")]
if not isinstance(other, Literal) or other.type.get_size_bits() == 64:
raise DecompFailure(
f"Unable to determine a value for double-precision register {reg} "
"whose second half is non-static. This is a mips_to_c restriction "
"which may be lifted in the future."
)
value = ret.value | (other.value << 32)
return Literal(value, type=Type.f64())
def cmp_reg(self, key: str) -> Condition:
cond = self.regs[Register(key)]
if not isinstance(cond, Condition):
cond = BinaryOp.icmp(cond, "!=", Literal(0))
return cond
def full_imm(self, index: int) -> Expression:
arg = strip_macros(self.raw_arg(index))
ret = literal_expr(arg, self.stack_info)
return ret
def imm(self, index: int) -> Expression:
ret = self.full_imm(index)
if isinstance(ret, Literal):
return Literal(((ret.value + 0x8000) & 0xFFFF) - 0x8000)
return ret
def unsigned_imm(self, index: int) -> Expression:
ret = self.full_imm(index)
if isinstance(ret, Literal):
return Literal(ret.value & 0xFFFF)
return ret
def hi_imm(self, index: int) -> Argument:
arg = self.raw_arg(index)
if not isinstance(arg, Macro) or arg.macro_name not in ("hi", "ha", "h"):
raise DecompFailure(
f"Got lui/lis instruction with macro other than %hi/@ha/@h: {arg}"
)
return arg.argument
def shifted_imm(self, index: int) -> Expression:
# TODO: Should this be part of hi_imm? Do we need to handle @ha?
raw_imm = self.unsigned_imm(index)
assert isinstance(raw_imm, Literal)
return Literal(raw_imm.value << 16)
def memory_ref(self, index: int) -> Union[AddressMode, RawSymbolRef]:
ret = strip_macros(self.raw_arg(index))
# In MIPS, we want to allow "lw $v0, symbol + 4", which is outputted by
# some disassemblers (like IDA) even though it isn't valid assembly.
# For PPC, we want to allow "lwz $r1, symbol@sda21($r13)" where $r13 is
# assumed to point to the start of a small data area (SDA).
if isinstance(ret, AsmGlobalSymbol):
return RawSymbolRef(offset=0, sym=ret)
if (
isinstance(ret, BinOp)
and ret.op in "+-"
and isinstance(ret.lhs, AsmGlobalSymbol)
and isinstance(ret.rhs, AsmLiteral)
):
sign = 1 if ret.op == "+" else -1
return RawSymbolRef(offset=(ret.rhs.value * sign), sym=ret.lhs)
if not isinstance(ret, AsmAddressMode):
raise DecompFailure(
"Expected instruction argument to be of the form offset($register), "
f"but found {ret}"
)
if not isinstance(ret.lhs, AsmLiteral):
raise DecompFailure(
f"Unable to parse offset for instruction argument {ret}. "
"Expected a constant or a %lo macro."
)
return AddressMode(offset=ret.lhs.signed_value(), rhs=ret.rhs)
def count(self) -> int:
return len(self.raw_args)
def deref(
arg: Union[AddressMode, RawSymbolRef, Expression],
regs: RegInfo,
stack_info: StackInfo,
*,
size: int,
store: bool = False,
) -> Expression:
if isinstance(arg, Expression):
offset = 0
var = arg
elif isinstance(arg, AddressMode):
offset = arg.offset
if stack_info.is_stack_reg(arg.rhs):
return stack_info.get_stack_var(offset, store=store)
var = regs[arg.rhs]
else:
offset = arg.offset
var = stack_info.global_info.address_of_gsym(arg.sym.symbol_name)
# Struct member is being dereferenced.
# Cope slightly better with raw pointers.
if isinstance(var, Literal) and var.value % (2 ** 16) == 0:
var = Literal(var.value + offset, type=var.type)
offset = 0
# Handle large struct offsets.
uw_var = early_unwrap(var)
if isinstance(uw_var, BinaryOp) and uw_var.op == "+":
for base, addend in [(uw_var.left, uw_var.right), (uw_var.right, uw_var.left)]:
if isinstance(addend, Literal) and addend.likely_partial_offset():
offset += addend.value
var = base
uw_var = early_unwrap(var)
break
var.type.unify(Type.ptr())
stack_info.record_struct_access(var, offset)
field_name: Optional[str] = None
type: Type = stack_info.unique_type_for("struct", (uw_var, offset), Type.any())
# Struct access with type information.
array_expr = array_access_from_add(
var, offset, stack_info, target_size=size, ptr=False
)
if array_expr is not None:
return array_expr
field_path, field_type, _ = var.type.get_deref_field(offset, target_size=size)
if field_path is not None:
field_type.unify(type)
type = field_type
else:
field_path = None
return StructAccess(
struct_var=var,
offset=offset,
target_size=size,
field_path=field_path,
stack_info=stack_info,
type=type,
)
def is_trivial_expression(expr: Expression) -> bool:
# Determine whether an expression should be evaluated only once or not.
if isinstance(
expr,
(
EvalOnceExpr,
Literal,
GlobalSymbol,
LocalVar,
PassedInArg,
PhiExpr,
RegisterVar,
SubroutineArg,
),
):
return True
if isinstance(expr, AddressOf):
return all(is_trivial_expression(e) for e in expr.dependencies())
if isinstance(expr, Cast):
return expr.is_trivial()
return False
def is_type_obvious(expr: Expression) -> bool:
"""
Determine whether an expression's type is "obvious", e.g. because the
expression refers to a variable which has a declaration. With perfect type
information this this function would not be needed.
This function may produce wrong results while code is being generated,
since at that point we don't know the final status of EvalOnceExpr's.
"""
if isinstance(
expr,
(
Cast,
Literal,
AddressOf,
LocalVar,
PhiExpr,
PassedInArg,
RegisterVar,
FuncCall,
),
):
return True
if isinstance(expr, EvalOnceExpr):
if expr.need_decl():
return True
return is_type_obvious(expr.wrapped_expr)
return False
def simplify_condition(expr: Expression) -> Expression:
"""
Simplify a boolean expression.
This function may produce wrong results while code is being generated,
since at that point we don't know the final status of EvalOnceExpr's.
"""
if isinstance(expr, EvalOnceExpr) and not expr.need_decl():
return simplify_condition(expr.wrapped_expr)
if isinstance(expr, UnaryOp):
inner = simplify_condition(expr.expr)
if expr.op == "!" and isinstance(inner, Condition):
return inner.negated()
return UnaryOp(expr=inner, op=expr.op, type=expr.type)
if isinstance(expr, BinaryOp):
left = simplify_condition(expr.left)
right = simplify_condition(expr.right)
if isinstance(left, BinaryOp) and left.is_comparison() and right == Literal(0):
if expr.op == "==":
return simplify_condition(left.negated())
if expr.op == "!=":
return left
if (
expr.is_comparison()
and isinstance(left, Literal)
and not isinstance(right, Literal)
):
return BinaryOp(
left=right,
op=expr.op.translate(str.maketrans("<>", "><")),
right=left,
type=expr.type,
)
return BinaryOp(left=left, op=expr.op, right=right, type=expr.type)
return expr
def balanced_parentheses(string: str) -> bool:
"""
Check if parentheses in a string are balanced, ignoring any non-parenthesis
characters. E.g. true for "(x())yz", false for ")(" or "(".
"""
bal = 0
for c in string:
if c == "(":
bal += 1
elif c == ")":
if bal == 0:
return False
bal -= 1
return bal == 0
def format_expr(expr: Expression, fmt: Formatter) -> str:
"""Stringify an expression, stripping unnecessary parentheses around it."""
ret = expr.format(fmt)
if ret.startswith("(") and balanced_parentheses(ret[1:-1]):
return ret[1:-1]
return ret
def format_assignment(dest: Expression, source: Expression, fmt: Formatter) -> str:
"""Stringify `dest = source;`."""
dest = late_unwrap(dest)
source = late_unwrap(source)
if isinstance(source, BinaryOp) and source.op in COMPOUND_ASSIGNMENT_OPS:
rhs = None
if late_unwrap(source.left) == dest:
rhs = source.right
elif late_unwrap(source.right) == dest and source.op in ASSOCIATIVE_OPS:
rhs = source.left
if rhs is not None:
return f"{dest.format(fmt)} {source.op}= {format_expr(rhs, fmt)};"
return f"{dest.format(fmt)} = {format_expr(source, fmt)};"
def parenthesize_for_struct_access(expr: Expression, fmt: Formatter) -> str:
# Nested dereferences may need to be parenthesized. All other
# expressions will already have adequate parentheses added to them.
s = expr.format(fmt)
if (
s.startswith("*")
or s.startswith("&")
or (isinstance(expr, Cast) and expr.needed_for_store())
):
return f"({s})"
return s
def elide_casts_for_store(expr: Expression) -> Expression:
uw_expr = late_unwrap(expr)
if isinstance(uw_expr, Cast) and not uw_expr.needed_for_store():
return elide_casts_for_store(uw_expr.expr)
if isinstance(uw_expr, Literal) and uw_expr.type.is_int():
# Avoid suffixes for unsigned ints
return replace(uw_expr, elide_cast=True)
return uw_expr
def uses_expr(expr: Expression, expr_filter: Callable[[Expression], bool]) -> bool:
if expr_filter(expr):
return True
for e in expr.dependencies():
if uses_expr(e, expr_filter):
return True
return False
def late_unwrap(expr: Expression) -> Expression:
"""
Unwrap EvalOnceExpr's, stopping at variable boundaries.
This function may produce wrong results while code is being generated,
since at that point we don't know the final status of EvalOnceExpr's.
"""
if isinstance(expr, EvalOnceExpr) and not expr.need_decl():
return late_unwrap(expr.wrapped_expr)
if isinstance(expr, PhiExpr) and expr.replacement_expr is not None:
return late_unwrap(expr.replacement_expr)
return expr
def early_unwrap(expr: Expression) -> Expression:
"""
Unwrap EvalOnceExpr's, even past variable boundaries.
This is fine to use even while code is being generated, but disrespects decisions
to use a temp for a value, so use with care.
"""
if (
isinstance(expr, EvalOnceExpr)
and not expr.forced_emit
and not expr.emit_exactly_once
):
return early_unwrap(expr.wrapped_expr)
return expr
def early_unwrap_ints(expr: Expression) -> Expression:
"""
Unwrap EvalOnceExpr's, even past variable boundaries or through int Cast's
This is a bit sketchier than early_unwrap(), but can be used for pattern matching.
"""
uw_expr = early_unwrap(expr)
if isinstance(uw_expr, Cast) and uw_expr.reinterpret and uw_expr.type.is_int():
return early_unwrap_ints(uw_expr.expr)
return uw_expr
def unwrap_deep(expr: Expression) -> Expression:
"""
Unwrap EvalOnceExpr's, even past variable boundaries.
This is generally a sketchy thing to do, try to avoid it. In particular:
- the returned expression is not usable for emission, because it may contain
accesses at an earlier point in time or an expression that should not be repeated.
- just because unwrap_deep(a) == unwrap_deep(b) doesn't mean a and b are
interchangable, because they may be computed in different places.
"""
if isinstance(expr, EvalOnceExpr):
return unwrap_deep(expr.wrapped_expr)
return expr
def literal_expr(arg: Argument, stack_info: StackInfo) -> Expression:
if isinstance(arg, AsmGlobalSymbol):
return stack_info.global_info.address_of_gsym(arg.symbol_name)
if isinstance(arg, AsmLiteral):
return Literal(arg.value)
if isinstance(arg, BinOp):
lhs = literal_expr(arg.lhs, stack_info)
rhs = literal_expr(arg.rhs, stack_info)
return BinaryOp.int(left=lhs, op=arg.op, right=rhs)
raise DecompFailure(f"Instruction argument {arg} must be a literal")
def imm_add_32(expr: Expression) -> Expression:
if isinstance(expr, Literal):
return as_intish(Literal(expr.value + 32))
else:
return BinaryOp.int(expr, "+", Literal(32))
def fn_op(fn_name: str, args: List[Expression], type: Type) -> FuncCall:
fn_sig = FunctionSignature(
return_type=type,
params=[FunctionParam(type=arg.type) for arg in args],
params_known=True,
is_variadic=False,
)
return FuncCall(
function=GlobalSymbol(symbol_name=fn_name, type=Type.function(fn_sig)),
args=args,
type=type,
)
def void_fn_op(fn_name: str, args: List[Expression]) -> ExprStmt:
fn_call = fn_op(fn_name, args, Type.any_reg())
fn_call.use()
return ExprStmt(fn_call)
def load_upper(args: InstrArgs) -> Expression:
arg = args.raw_arg(1)
if not isinstance(arg, Macro):
assert not isinstance(
arg, Literal
), "normalize_instruction should convert lui/lis <literal> to li"
raise DecompFailure(
f"lui/lis argument must be a literal or %hi/@ha macro, found {arg}"
)
hi_arg = args.hi_imm(1)
if (
isinstance(hi_arg, BinOp)
and hi_arg.op in "+-"
and isinstance(hi_arg.lhs, AsmGlobalSymbol)
and isinstance(hi_arg.rhs, AsmLiteral)
):
sym = hi_arg.lhs
offset = hi_arg.rhs.value * (-1 if hi_arg.op == "-" else 1)
elif isinstance(hi_arg, AsmGlobalSymbol):
sym = hi_arg
offset = 0
else:
raise DecompFailure(f"Invalid %hi/@ha argument {hi_arg}")
stack_info = args.stack_info
source = stack_info.global_info.address_of_gsym(sym.symbol_name)
imm = Literal(offset)
return handle_addi_real(args.reg_ref(0), None, source, imm, stack_info)
def handle_convert(expr: Expression, dest_type: Type, source_type: Type) -> Cast:
# int <-> float casts should be explicit
silent = dest_type.data().kind != source_type.data().kind
expr.type.unify(source_type)
return Cast(expr=expr, type=dest_type, silent=silent, reinterpret=False)
def handle_la(args: InstrArgs) -> Expression:
target = args.memory_ref(1)
stack_info = args.stack_info
if isinstance(target, AddressMode):
return handle_addi(
InstrArgs(
raw_args=[args.reg_ref(0), target.rhs, AsmLiteral(target.offset)],
regs=args.regs,
stack_info=args.stack_info,
)
)
var = stack_info.global_info.address_of_gsym(target.sym.symbol_name)
return add_imm(var, Literal(target.offset), stack_info)
def handle_or(left: Expression, right: Expression) -> Expression:
if left == right:
# `or $rD, $rS, $rS` can be used to move $rS into $rD
return left
if isinstance(left, Literal) and isinstance(right, Literal):
if (((left.value & 0xFFFF) == 0 and (right.value & 0xFFFF0000) == 0)) or (
(right.value & 0xFFFF) == 0 and (left.value & 0xFFFF0000) == 0
):
return Literal(value=(left.value | right.value))
# Regular bitwise OR.
return BinaryOp.int(left=left, op="|", right=right)
def handle_sltu(args: InstrArgs) -> Expression:
right = args.reg(2)
if args.reg_ref(1) == Register("zero"):
# (0U < x) is equivalent to (x != 0)
uw_right = early_unwrap(right)
if isinstance(uw_right, BinaryOp) and uw_right.op == "^":
# ((a ^ b) != 0) is equivalent to (a != b)
return BinaryOp.icmp(uw_right.left, "!=", uw_right.right)
return BinaryOp.icmp(right, "!=", Literal(0))
else:
left = args.reg(1)
return BinaryOp.ucmp(left, "<", right)
def handle_sltiu(args: InstrArgs) -> Expression:
left = args.reg(1)
right = args.imm(2)
if isinstance(right, Literal):
value = right.value & 0xFFFFFFFF
if value == 1:
# (x < 1U) is equivalent to (x == 0)
uw_left = early_unwrap(left)
if isinstance(uw_left, BinaryOp) and uw_left.op == "^":
# ((a ^ b) == 0) is equivalent to (a == b)
return BinaryOp.icmp(uw_left.left, "==", uw_left.right)
return BinaryOp.icmp(left, "==", Literal(0))
else:
right = Literal(value)
return BinaryOp.ucmp(left, "<", right)
def handle_addi(args: InstrArgs) -> Expression:
stack_info = args.stack_info
source_reg = args.reg_ref(1)
source = args.reg(1)
imm = args.imm(2)
# `(x + 0xEDCC)` is emitted as `((x + 0x10000) - 0x1234)`,
# i.e. as an `addis` followed by an `addi`
uw_source = early_unwrap(source)
if (
isinstance(uw_source, BinaryOp)
and uw_source.op == "+"
and isinstance(uw_source.right, Literal)
and uw_source.right.value % 0x10000 == 0
and isinstance(imm, Literal)
):
return add_imm(
uw_source.left, Literal(imm.value + uw_source.right.value), stack_info
)
return handle_addi_real(args.reg_ref(0), source_reg, source, imm, stack_info)
def handle_addis(args: InstrArgs) -> Expression:
stack_info = args.stack_info
source_reg = args.reg_ref(1)
source = args.reg(1)
imm = args.shifted_imm(2)
return handle_addi_real(args.reg_ref(0), source_reg, source, imm, stack_info)
def handle_addi_real(
output_reg: Register,
source_reg: Optional[Register],
source: Expression,
imm: Expression,
stack_info: StackInfo,
) -> Expression:
if source_reg is not None and stack_info.is_stack_reg(source_reg):
# Adding to sp, i.e. passing an address.
assert isinstance(imm, Literal)
if stack_info.is_stack_reg(output_reg):
# Changing sp. Just ignore that.
return source
# Keep track of all local variables that we take addresses of.
var = stack_info.get_stack_var(imm.value, store=False)
if isinstance(var, LocalVar):
stack_info.add_local_var(var)
return AddressOf(var, type=var.type.reference())
else:
return add_imm(source, imm, stack_info)
def add_imm(source: Expression, imm: Expression, stack_info: StackInfo) -> Expression:
if imm == Literal(0):
# addiu $reg1, $reg2, 0 is a move
# (this happens when replacing %lo(...) by 0)
return source
elif source.type.is_pointer_or_array():
# Pointer addition (this may miss some pointers that get detected later;
# unfortunately that's hard to do anything about with mips_to_c's single-pass
# architecture).
if isinstance(imm, Literal) and not imm.likely_partial_offset():
array_access = array_access_from_add(
source, imm.value, stack_info, target_size=None, ptr=True
)
if array_access is not None:
return array_access
field_path, field_type, _ = source.type.get_deref_field(
imm.value, target_size=None
)
if field_path is not None:
return AddressOf(
StructAccess(
struct_var=source,
offset=imm.value,
target_size=None,
field_path=field_path,
stack_info=stack_info,
type=field_type,
),
type=field_type.reference(),
)
if isinstance(imm, Literal):
target = source.type.get_pointer_target()
if target:
target_size = target.get_size_bytes()
if target_size and imm.value % target_size == 0:
# Pointer addition.
return BinaryOp(
left=source, op="+", right=as_intish(imm), type=source.type
)
return BinaryOp(left=source, op="+", right=as_intish(imm), type=Type.ptr())
elif isinstance(source, Literal) and isinstance(imm, Literal):
return Literal(source.value + imm.value)
else:
# Regular binary addition.
return BinaryOp.intptr(left=source, op="+", right=imm)
def handle_load(args: InstrArgs, type: Type) -> Expression:
# For now, make the cast silent so that output doesn't become cluttered.
# Though really, it would be great to expose the load types somehow...
size = type.get_size_bytes()
assert size is not None
expr = deref(args.memory_ref(1), args.regs, args.stack_info, size=size)
# Detect rodata constants
if isinstance(expr, StructAccess) and expr.offset == 0:
target = early_unwrap(expr.struct_var)
if (
isinstance(target, AddressOf)
and isinstance(target.expr, GlobalSymbol)
and type.is_likely_float()
):
sym_name = target.expr.symbol_name
ent = args.stack_info.global_info.asm_data_value(sym_name)
if (
ent
and ent.data
and isinstance(ent.data[0], bytes)
and len(ent.data[0]) >= size
and ent.is_readonly
and type.unify(target.expr.type)
):
data = ent.data[0][:size]
val: int
if size == 4:
(val,) = struct.unpack(">I", data)
else:
(val,) = struct.unpack(">Q", data)
return Literal(value=val, type=type)
return as_type(expr, type, silent=True)
def deref_unaligned(
arg: Union[AddressMode, RawSymbolRef],
regs: RegInfo,
stack_info: StackInfo,
*,
store: bool = False,
) -> Expression:
# We don't know the correct size pass to deref. Passing None would signal that we
# are taking an address, cause us to prefer entire substructs as referenced fields,
# which would be confusing. Instead, we lie and pass 1. Hopefully nothing bad will
# happen...
return deref(arg, regs, stack_info, size=1, store=store)
def handle_lwl(args: InstrArgs) -> Expression:
# Unaligned load for the left part of a register (lwl can technically merge with
# a pre-existing lwr, but doesn't in practice, so we treat this as a standard
# destination-first operation)
ref = args.memory_ref(1)
expr = deref_unaligned(ref, args.regs, args.stack_info)
key: Tuple[int, object]
if isinstance(ref, AddressMode):
key = (ref.offset, args.regs[ref.rhs])
else:
key = (ref.offset, ref.sym)
return Lwl(expr, key)
def handle_lwr(args: InstrArgs) -> Expression:
# Unaligned load for the right part of a register. This lwr may merge with an
# existing lwl, if it loads from the same target but with an offset that's +3.
uw_old_value = early_unwrap(args.reg(0))
ref = args.memory_ref(1)
lwl_key: Tuple[int, object]
if isinstance(ref, AddressMode):
lwl_key = (ref.offset - 3, args.regs[ref.rhs])
else:
lwl_key = (ref.offset - 3, ref.sym)
if isinstance(uw_old_value, Lwl) and uw_old_value.key[0] == lwl_key[0]:
return UnalignedLoad(uw_old_value.load_expr)
if ref.offset % 4 == 2:
left_mem_ref = replace(ref, offset=ref.offset - 2)
load_expr = deref_unaligned(left_mem_ref, args.regs, args.stack_info)
return Load3Bytes(load_expr)
return ErrorExpr("Unable to handle lwr; missing a corresponding lwl")
def make_store(args: InstrArgs, type: Type) -> Optional[StoreStmt]:
size = type.get_size_bytes()
assert size is not None
stack_info = args.stack_info
source_reg = args.reg_ref(0)
source_raw = args.regs.get_raw(source_reg)
if type.is_likely_float() and size == 8:
source_val = args.dreg(0)
else:
source_val = args.reg(0)
target = args.memory_ref(1)
is_stack = isinstance(target, AddressMode) and stack_info.is_stack_reg(target.rhs)
if (
is_stack
and source_raw is not None
and stack_info.should_save(source_raw, target.offset)
):
# Elide register preserval.
return None
dest = deref(target, args.regs, stack_info, size=size, store=True)
dest.type.unify(type)
return StoreStmt(source=as_type(source_val, type, silent=is_stack), dest=dest)
def make_storex(args: InstrArgs, type: Type) -> Optional[StoreStmt]:
# "indexed stores" like `stwx rS, rA, rB` write `rS` into `(rA + rB)`
size = type.get_size_bytes()
assert size is not None
source = args.reg(0)
ptr = BinaryOp.intptr(left=args.reg(1), op="+", right=args.reg(2))
# TODO: Can we assume storex's are never used to save registers to the stack?
dest = deref(ptr, args.regs, args.stack_info, size=size, store=True)
dest.type.unify(type)
return StoreStmt(source=as_type(source, type, silent=False), dest=dest)
def handle_swl(args: InstrArgs) -> Optional[StoreStmt]:
# swl in practice only occurs together with swr, so we can treat it as a regular
# store, with the expression wrapped in UnalignedLoad if needed.
source = args.reg(0)
target = args.memory_ref(1)
if not isinstance(early_unwrap(source), UnalignedLoad):
source = UnalignedLoad(source)
dest = deref_unaligned(target, args.regs, args.stack_info, store=True)
return StoreStmt(source=source, dest=dest)
def handle_swr(args: InstrArgs) -> Optional[StoreStmt]:
expr = early_unwrap(args.reg(0))
target = args.memory_ref(1)
if not isinstance(expr, Load3Bytes):
# Elide swr's that don't come from 3-byte-loading lwr's; they probably
# come with a corresponding swl which has already been emitted.
return None
real_target = replace(target, offset=target.offset - 2)
dest = deref_unaligned(real_target, args.regs, args.stack_info, store=True)
return StoreStmt(source=expr, dest=dest)
def handle_sra(args: InstrArgs) -> Expression:
lhs = args.reg(1)
shift = args.imm(2)
if isinstance(shift, Literal) and shift.value in [16, 24]:
expr = early_unwrap(lhs)
pow2 = 1 << shift.value
if isinstance(expr, BinaryOp) and isinstance(expr.right, Literal):
tp = Type.s16() if shift.value == 16 else Type.s8()
rhs = expr.right.value
if expr.op == "<<" and rhs == shift.value:
return as_type(expr.left, tp, silent=False)
elif expr.op == "<<" and rhs > shift.value:
new_shift = fold_mul_chains(
BinaryOp.int(expr.left, "<<", Literal(rhs - shift.value))
)
return as_type(new_shift, tp, silent=False)
elif expr.op == "*" and rhs % pow2 == 0 and rhs != pow2:
mul = BinaryOp.int(expr.left, "*", Literal(value=rhs // pow2))
return as_type(mul, tp, silent=False)
return fold_divmod(
BinaryOp(as_sintish(lhs), ">>", as_intish(shift), type=Type.s32())
)
def handle_conditional_move(args: InstrArgs, nonzero: bool) -> Expression:
op = "!=" if nonzero else "=="
type = Type.any_reg()
return TernaryOp(
BinaryOp.scmp(args.reg(2), op, Literal(0)),
as_type(args.reg(1), type, silent=True),
as_type(args.reg(0), type, silent=True),
type,
)
def format_f32_imm(num: int) -> str:
packed = struct.pack(">I", num & (2 ** 32 - 1))
value = struct.unpack(">f", packed)[0]
if not value or value == 4294967296.0:
# Zero, negative zero, nan, or INT_MAX.
return str(value)
# Write values smaller than 1e-7 / greater than 1e7 using scientific notation,
# and values in between using fixed point.
if abs(math.log10(abs(value))) > 6.9:
fmt_char = "e"
elif abs(value) < 1:
fmt_char = "f"
else:
fmt_char = "g"
def fmt(prec: int) -> str:
"""Format 'value' with 'prec' significant digits/decimals, in either scientific
or regular notation depending on 'fmt_char'."""
ret = ("{:." + str(prec) + fmt_char + "}").format(value)
if fmt_char == "e":
return ret.replace("e+", "e").replace("e0", "e").replace("e-0", "e-")
if "e" in ret:
# The "g" format character can sometimes introduce scientific notation if
# formatting with too few decimals. If this happens, return an incorrect
# value to prevent the result from being used.
#
# Since the value we are formatting is within (1e-7, 1e7) in absolute
# value, it will at least be possible to format with 7 decimals, which is
# less than float precision. Thus, this annoying Python limitation won't
# lead to us outputting numbers with more precision than we really have.
return "0"
return ret
# 20 decimals is more than enough for a float. Start there, then try to shrink it.
prec = 20
while prec > 0:
prec -= 1
value2 = float(fmt(prec))
if struct.pack(">f", value2) != packed:
prec += 1
break
if prec == 20:
# Uh oh, even the original value didn't format correctly. Fall back to str(),
# which ought to work.
return str(value)
ret = fmt(prec)
if "." not in ret:
ret += ".0"
return ret
def format_f64_imm(num: int) -> str:
(value,) = struct.unpack(">d", struct.pack(">Q", num & (2 ** 64 - 1)))
return str(value)
def fold_divmod(original_expr: BinaryOp) -> BinaryOp:
"""
Return a new BinaryOp instance if this one can be simplified to a single / or % op.
This involves simplifying expressions using MULT_HI, MULTU_HI, +, -, <<, >>, and /.
In GCC 2.7.2, the code that generates these instructions is in expmed.c.
See also https://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html
for a modern writeup of a similar algorithm.
This optimization is also used by MWCC and modern compilers (but not IDO).
"""
mult_high_ops = ("MULT_HI", "MULTU_HI")
possible_match_ops = mult_high_ops + ("-", "+", ">>")
# Only operate on integer expressions of certain operations
if original_expr.is_floating() or original_expr.op not in possible_match_ops:
return original_expr
# Use `early_unwrap_ints` instead of `early_unwrap` to ignore Casts to integer types
# Although this discards some extra type information, this function largely ignores
# sign/size information to stay simpler. The result will be made with BinaryOp.int()
# regardless of input types.
expr = original_expr
left_expr = early_unwrap_ints(expr.left)
right_expr = early_unwrap_ints(expr.right)
divisor_shift = 0
# Detect signed power-of-two division: (x >> N) + MIPS2C_CARRY --> x / (1 << N)
if (
isinstance(left_expr, BinaryOp)
and left_expr.op == ">>"
and isinstance(left_expr.right, Literal)
and expr.op == "+"
and isinstance(right_expr, CarryBit)
):
new_denom = 1 << left_expr.right.value
return BinaryOp.sint(
left=left_expr.left,
op="/",
right=Literal(new_denom),
silent=True,
)
# Fold `/` with `>>`: ((x / N) >> M) --> x / (N << M)
# NB: If x is signed, this is only correct if there is a sign-correcting subtraction term
if (
isinstance(left_expr, BinaryOp)
and left_expr.op == "/"
and isinstance(left_expr.right, Literal)
and expr.op == ">>"
and isinstance(right_expr, Literal)
):
new_denom = left_expr.right.value << right_expr.value
if new_denom < (1 << 32):
return BinaryOp.int(
left=left_expr.left,
op="/",
right=Literal(new_denom),
)
# Detect `%`: (x - ((x / y) * y)) --> x % y
if expr.op == "-" and isinstance(right_expr, BinaryOp) and right_expr.op == "*":
div_expr = early_unwrap_ints(right_expr.left)
mod_base = early_unwrap_ints(right_expr.right)
if (
isinstance(div_expr, BinaryOp)
and early_unwrap_ints(div_expr.left) == left_expr
):
# Accept either `(x / y) * y` or `(x >> N) * M` (where `1 << N == M`)
divisor = early_unwrap_ints(div_expr.right)
if (div_expr.op == "/" and divisor == mod_base) or (
div_expr.op == ">>"
and isinstance(divisor, Literal)
and isinstance(mod_base, Literal)
and (1 << divisor.value) == mod_base.value
):
return BinaryOp.int(left=left_expr, op="%", right=right_expr.right)
# Detect dividing by a negative: ((x >> 31) - (x / N)) --> x / -N
if (
expr.op == "-"
and isinstance(left_expr, BinaryOp)
and left_expr.op == ">>"
and early_unwrap_ints(left_expr.right) == Literal(31)
and isinstance(right_expr, BinaryOp)
and right_expr.op == "/"
and isinstance(right_expr.right, Literal)
):
# Swap left_expr & right_expr, but replace the N in right_expr with -N
left_expr, right_expr = (
replace(right_expr, right=Literal(-right_expr.right.value)),
left_expr,
)
# Remove outer error term: ((x / N) + ((x / N) >> 31)) --> x / N
# As N gets close to (1 << 30), this is no longer a negligible error term
if (
expr.op == "+"
and isinstance(left_expr, BinaryOp)
and left_expr.op == "/"
and isinstance(left_expr.right, Literal)
and left_expr.right.value <= (1 << 29)
and isinstance(right_expr, BinaryOp)
and early_unwrap_ints(right_expr.left) == left_expr
and right_expr.op == ">>"
and early_unwrap_ints(right_expr.right) == Literal(31)
):
return left_expr
# Remove outer error term: ((x / N) - (x >> 31)) --> x / N
if (
expr.op == "-"
and isinstance(left_expr, BinaryOp)
and left_expr.op == "/"
and isinstance(left_expr.right, Literal)
and isinstance(right_expr, BinaryOp)
and right_expr.op == ">>"
and early_unwrap_ints(right_expr.right) == Literal(31)
):
div_expr = left_expr
shift_var_expr = early_unwrap_ints(right_expr.left)
div_var_expr = early_unwrap_ints(div_expr.left)
# Check if the LHS of the shift is the same var that we're dividing by
if div_var_expr == shift_var_expr:
if isinstance(div_expr.right, Literal) and div_expr.right.value >= (
1 << 30
):
return BinaryOp.int(
left=div_expr.left,
op=div_expr.op,
right=div_expr.right,
)
return div_expr
# If the var is under 32 bits, the error term may look like `(x << K) >> 31` instead
if (
isinstance(shift_var_expr, BinaryOp)
and early_unwrap_ints(div_expr.left)
== early_unwrap_ints(shift_var_expr.left)
and shift_var_expr.op == "<<"
and isinstance(shift_var_expr.right, Literal)
):
return div_expr
# Shift on the result of the mul: MULT_HI(x, N) >> M, shift the divisor by M
if (
isinstance(left_expr, BinaryOp)
and expr.op == ">>"
and isinstance(right_expr, Literal)
):
divisor_shift += right_expr.value
expr = left_expr
left_expr = early_unwrap_ints(expr.left)
right_expr = early_unwrap_ints(expr.right)
# Normalize MULT_HI(N, x) to MULT_HI(x, N)
if isinstance(left_expr, Literal) and not isinstance(right_expr, Literal):
left_expr, right_expr = right_expr, left_expr
# Remove inner addition: (MULT_HI(x, N) + x) >> M --> MULT_HI(x, N) >> M
# MULT_HI performs signed multiplication, so the `+ x` acts as setting the 32nd bit
# while having a result with the same sign as x.
# We can ignore it because `round_div` can work with arbitrarily large constants
if (
isinstance(left_expr, BinaryOp)
and left_expr.op == "MULT_HI"
and expr.op == "+"
and early_unwrap_ints(left_expr.left) == right_expr
):
expr = left_expr
left_expr = early_unwrap_ints(expr.left)
right_expr = early_unwrap_ints(expr.right)
# Shift on the LHS of the mul: MULT_HI(x >> M, N) --> MULT_HI(x, N) >> M
if (
expr.op in mult_high_ops
and isinstance(left_expr, BinaryOp)
and left_expr.op == ">>"
and isinstance(left_expr.right, Literal)
):
divisor_shift += left_expr.right.value
left_expr = early_unwrap_ints(left_expr.left)
# Instead of checking for the error term precisely, just check that
# the quotient is "close enough" to the integer value
def round_div(x: int, y: int) -> Optional[int]:
if y <= 1:
return None
result = round(x / y)
if x / (y + 1) <= result <= x / (y - 1):
return result
return None
if expr.op in mult_high_ops and isinstance(right_expr, Literal):
denom = round_div(1 << (32 + divisor_shift), right_expr.value)
if denom is not None:
return BinaryOp.int(
left=left_expr,
op="/",
right=Literal(denom),
)
return original_expr
def replace_clz_shift(expr: BinaryOp) -> BinaryOp:
"""
Simplify an expression matching `CLZ(x) >> 5` into `x == 0`,
and further simplify `(a - b) == 0` into `a == b`.
"""
# Check that the outer expression is `>>`
if expr.is_floating() or expr.op != ">>":
return expr
# Match `CLZ(x) >> 5`, or return the original expr
left_expr = early_unwrap_ints(expr.left)
right_expr = early_unwrap_ints(expr.right)
if not (
isinstance(left_expr, UnaryOp)
and left_expr.op == "CLZ"
and isinstance(right_expr, Literal)
and right_expr.value == 5
):
return expr
# If the inner `x` is `(a - b)`, return `a == b`
sub_expr = early_unwrap(left_expr.expr)
if (
isinstance(sub_expr, BinaryOp)
and not sub_expr.is_floating()
and sub_expr.op == "-"
):
return BinaryOp.icmp(sub_expr.left, "==", sub_expr.right)
return BinaryOp.icmp(left_expr.expr, "==", Literal(0, type=left_expr.expr.type))
def replace_bitand(expr: BinaryOp) -> Expression:
"""Detect expressions using `&` for truncating integer casts"""
if not expr.is_floating() and expr.op == "&":
if expr.right == Literal(0xFF):
return as_type(expr.left, Type.int_of_size(8), silent=False)
if expr.right == Literal(0xFFFF):
return as_type(expr.left, Type.int_of_size(16), silent=False)
return expr
def fold_mul_chains(expr: Expression) -> Expression:
"""Simplify an expression involving +, -, * and << to a single multiplication,
e.g. 4*x - x -> 3*x, or x<<2 -> x*4. This includes some logic for preventing
folds of consecutive sll, and keeping multiplications by large powers of two
as bitshifts at the top layer."""
def fold(
expr: Expression, toplevel: bool, allow_sll: bool
) -> Tuple[Expression, int]:
if isinstance(expr, BinaryOp):
lbase, lnum = fold(expr.left, False, (expr.op != "<<"))
rbase, rnum = fold(expr.right, False, (expr.op != "<<"))
if expr.op == "<<" and isinstance(expr.right, Literal) and allow_sll:
# Left-shifts by small numbers are easier to understand if
# written as multiplications (they compile to the same thing).
if toplevel and lnum == 1 and not (1 <= expr.right.value <= 4):
return (expr, 1)
return (lbase, lnum << expr.right.value)
if (
expr.op == "*"
and isinstance(expr.right, Literal)
and (allow_sll or expr.right.value % 2 != 0)
):
return (lbase, lnum * expr.right.value)
if early_unwrap(lbase) == early_unwrap(rbase):
if expr.op == "+":
return (lbase, lnum + rnum)
if expr.op == "-":
return (lbase, lnum - rnum)
if isinstance(expr, UnaryOp) and expr.op == "-" and not toplevel:
base, num = fold(expr.expr, False, True)
return (base, -num)
if (
isinstance(expr, EvalOnceExpr)
and not expr.emit_exactly_once
and not expr.forced_emit
):
base, num = fold(early_unwrap(expr), False, allow_sll)
if num != 1 and is_trivial_expression(base):
return (base, num)
return (expr, 1)
base, num = fold(expr, True, True)
if num == 1:
return expr
return BinaryOp.int(left=base, op="*", right=Literal(num))
def array_access_from_add(
expr: Expression,
offset: int,
stack_info: StackInfo,
*,
target_size: Optional[int],
ptr: bool,
) -> Optional[Expression]:
expr = early_unwrap(expr)
if not isinstance(expr, BinaryOp) or expr.op != "+":
return None
base = expr.left
addend = expr.right
if addend.type.is_pointer_or_array() and not base.type.is_pointer_or_array():
base, addend = addend, base
index: Expression
scale: int
uw_addend = early_unwrap(addend)
if (
isinstance(uw_addend, BinaryOp)
and uw_addend.op == "*"
and isinstance(uw_addend.right, Literal)
):
index = uw_addend.left
scale = uw_addend.right.value
elif (
isinstance(uw_addend, BinaryOp)
and uw_addend.op == "<<"
and isinstance(uw_addend.right, Literal)
):
index = uw_addend.left
scale = 1 << uw_addend.right.value
else:
index = addend
scale = 1
if scale < 0:
scale = -scale
index = UnaryOp.sint("-", index)
target_type = base.type.get_pointer_target()
if target_type is None:
return None
uw_base = early_unwrap(base)
typepool = stack_info.global_info.typepool
# In `&x + index * scale`, if the type of `x` is not known, try to mark it as an array.
# Skip the `scale = 1` case because this often indicates a complex `index` expression,
# and is not actually a 1-byte array lookup.
if (
scale > 1
and offset == 0
and isinstance(uw_base, AddressOf)
and target_type.get_size_bytes() is None
):
inner_type: Optional[Type] = None
if (
isinstance(uw_base.expr, GlobalSymbol)
and uw_base.expr.potential_array_dim(scale)[1] != 0
):
# For GlobalSymbols, use the size of the asm data to check the feasibility of being
# an array with `scale`. This helps be more conservative around fake symbols.
pass
elif scale == 2:
# This *could* be a struct, but is much more likely to be an int
inner_type = Type.int_of_size(16)
elif scale == 4:
inner_type = Type.reg32(likely_float=False)
elif typepool.unk_inference and isinstance(uw_base.expr, GlobalSymbol):
# Make up a struct with a tag name based on the symbol & struct size.
# Although `scale = 8` could indicate an array of longs/doubles, it seems more
# common to be an array of structs.
struct_name = f"_struct_{uw_base.expr.symbol_name}_0x{scale:X}"
struct = typepool.get_struct_by_tag_name(
struct_name, stack_info.global_info.typemap
)
if struct is None:
struct = StructDeclaration.unknown(
typepool, size=scale, tag_name=struct_name
)
elif struct.size != scale:
# This should only happen if there was already a struct with this name in the context
raise DecompFailure(f"sizeof(struct {struct_name}) != {scale:#x}")
inner_type = Type.struct(struct)
if inner_type is not None:
# This might fail, if `uw_base.expr.type` can't be changed to an array
uw_base.expr.type.unify(Type.array(inner_type, dim=None))
# This acts as a backup, and will usually succeed
target_type.unify(inner_type)
if target_type.get_size_bytes() == scale:
# base[index]
pass
else:
# base->subarray[index]
sub_path, sub_type, remaining_offset = base.type.get_deref_field(
offset, target_size=scale, exact=False
)
# Check if the last item in the path is `0`, which indicates the start of an array
# If it is, remove it: it will be replaced by `[index]`
if sub_path is None or len(sub_path) < 2 or sub_path[-1] != 0:
return None
sub_path.pop()
base = StructAccess(
struct_var=base,
offset=offset - remaining_offset,
target_size=None,
field_path=sub_path,
stack_info=stack_info,
type=sub_type,
)
offset = remaining_offset
target_type = sub_type
ret: Expression = ArrayAccess(base, index, type=target_type)
# Add .field if necessary by wrapping ret in StructAccess(AddressOf(...))
ret_ref = AddressOf(ret, type=ret.type.reference())
field_path, field_type, _ = ret_ref.type.get_deref_field(
offset, target_size=target_size
)
if offset != 0 or (target_size is not None and target_size != scale):
ret = StructAccess(
struct_var=ret_ref,
offset=offset,
target_size=target_size,
field_path=field_path,
stack_info=stack_info,
type=field_type,
)
if ptr:
ret = AddressOf(ret, type=ret.type.reference())
return ret
def handle_add(args: InstrArgs) -> Expression:
lhs = args.reg(1)
rhs = args.reg(2)
stack_info = args.stack_info
type = Type.intptr()
# Because lhs & rhs are in registers, it shouldn't be possible for them to be arrays.
# If they are, treat them the same as pointers anyways.
if lhs.type.is_pointer_or_array():
type = Type.ptr()
elif rhs.type.is_pointer_or_array():
type = Type.ptr()
# addiu instructions can sometimes be emitted as addu instead, when the
# offset is too large.
if isinstance(rhs, Literal):
return handle_addi_real(args.reg_ref(0), args.reg_ref(1), lhs, rhs, stack_info)
if isinstance(lhs, Literal):
return handle_addi_real(args.reg_ref(0), args.reg_ref(2), rhs, lhs, stack_info)
expr = BinaryOp(left=as_intptr(lhs), op="+", right=as_intptr(rhs), type=type)
folded_expr = fold_mul_chains(expr)
if isinstance(folded_expr, BinaryOp):
folded_expr = fold_divmod(folded_expr)
if folded_expr is not expr:
return folded_expr
array_expr = array_access_from_add(expr, 0, stack_info, target_size=None, ptr=True)
if array_expr is not None:
return array_expr
return expr
def handle_add_float(args: InstrArgs) -> Expression:
if args.reg_ref(1) == args.reg_ref(2):
two = Literal(1 << 30, type=Type.f32())
return BinaryOp.f32(two, "*", args.reg(1))
return BinaryOp.f32(args.reg(1), "+", args.reg(2))
def handle_add_double(args: InstrArgs) -> Expression:
if args.reg_ref(1) == args.reg_ref(2):
two = Literal(1 << 62, type=Type.f64())
return BinaryOp.f64(two, "*", args.dreg(1))
return BinaryOp.f64(args.dreg(1), "+", args.dreg(2))
def handle_bgez(args: InstrArgs) -> Condition:
expr = args.reg(0)
uw_expr = early_unwrap(expr)
if (
isinstance(uw_expr, BinaryOp)
and uw_expr.op == "<<"
and isinstance(uw_expr.right, Literal)
):
shift = uw_expr.right.value
bitand = BinaryOp.int(uw_expr.left, "&", Literal(1 << (31 - shift)))
return UnaryOp("!", bitand, type=Type.bool())
return BinaryOp.scmp(expr, ">=", Literal(0))
def rlwi_mask(mask_begin: int, mask_end: int) -> int:
# Compute the mask constant used by the rlwi* family of PPC instructions,
# referred to as the `MASK(MB, ME)` function in the processor manual.
# Bit 0 is the MSB, Bit 31 is the LSB
bits_upto: Callable[[int], int] = lambda m: (1 << (32 - m)) - 1
all_ones = 0xFFFFFFFF
if mask_begin <= mask_end:
# Set bits inside the range, fully inclusive
mask = bits_upto(mask_begin) - bits_upto(mask_end + 1)
else:
# Set bits from [31, mask_end] and [mask_begin, 0]
mask = (bits_upto(mask_end + 1) - bits_upto(mask_begin)) ^ all_ones
return mask
def handle_rlwinm(
source: Expression,
shift: int,
mask_begin: int,
mask_end: int,
simplify: bool = True,
) -> Expression:
# TODO: Detect shift + truncate, like `(x << 2) & 0xFFF3` or `(x >> 2) & 0x3FFF`
# The output of the rlwinm instruction is `ROTL(source, shift) & mask`. We write this as
# ((source << shift) & mask) | ((source >> (32 - shift)) & mask)
# and compute both OR operands (upper_bits and lower_bits respectively).
all_ones = 0xFFFFFFFF
mask = rlwi_mask(mask_begin, mask_end)
left_shift = shift
right_shift = 32 - shift
left_mask = (all_ones << left_shift) & mask
right_mask = (all_ones >> right_shift) & mask
# We only simplify if the `simplify` argument is True, and there will be no `|` in the
# resulting expression. If there is an `|`, the expression is best left as bitwise math
simplify = simplify and not (left_mask and right_mask)
if isinstance(source, Literal):
upper_value = (source.value << left_shift) & mask
lower_value = (source.value >> right_shift) & mask
return Literal(upper_value | lower_value)
upper_bits: Optional[Expression]
if left_mask == 0:
upper_bits = None
else:
upper_bits = source
if left_shift != 0:
upper_bits = BinaryOp.int(
left=upper_bits, op="<<", right=Literal(left_shift)
)
if simplify:
upper_bits = fold_mul_chains(upper_bits)
if left_mask != (all_ones << left_shift) & all_ones:
upper_bits = BinaryOp.int(left=upper_bits, op="&", right=Literal(left_mask))
if simplify:
upper_bits = replace_bitand(upper_bits)
lower_bits: Optional[Expression]
if right_mask == 0:
lower_bits = None
else:
lower_bits = BinaryOp.uint(left=source, op=">>", right=Literal(right_shift))
if simplify:
lower_bits = replace_clz_shift(fold_divmod(lower_bits))
if right_mask != (all_ones >> right_shift) & all_ones:
lower_bits = BinaryOp.int(
left=lower_bits, op="&", right=Literal(right_mask)
)
if simplify:
lower_bits = replace_bitand(lower_bits)
if upper_bits is None and lower_bits is None:
return Literal(0)
elif upper_bits is None:
assert lower_bits is not None
return lower_bits
elif lower_bits is None:
return upper_bits
else:
return BinaryOp.int(left=upper_bits, op="|", right=lower_bits)
def handle_rlwimi(
base: Expression, source: Expression, shift: int, mask_begin: int, mask_end: int
) -> Expression:
# This instruction reads from `base`, replaces some bits with values from `source`, then
# writes the result back into the first register. This can be used to copy any contiguous
# bitfield from `source` into `base`, and is commonly used when manipulating flags, such
# as in `x |= 0x10` or `x &= ~0x10`.
# It's generally more readable to write the mask with `~` (instead of computing the inverse here)
mask_literal = Literal(rlwi_mask(mask_begin, mask_end))
mask = UnaryOp("~", mask_literal, type=Type.u32())
masked_base = BinaryOp.int(left=base, op="&", right=mask)
if source == Literal(0):
# If the source is 0, there are no bits inserted. (This may look like `x &= ~0x10`)
return masked_base
# Set `simplify=False` to keep the `inserted` expression as bitwise math instead of `*` or `/`
inserted = handle_rlwinm(source, shift, mask_begin, mask_end, simplify=False)
if inserted == mask_literal:
# If this instruction will set all the bits in the mask, we can OR the values
# together without masking the base. (`x |= 0xF0` instead of `x = (x & ~0xF0) | 0xF0`)
return BinaryOp.int(left=base, op="|", right=inserted)
return BinaryOp.int(left=masked_base, op="|", right=inserted)
def handle_loadx(args: InstrArgs, type: Type) -> Expression:
# "indexed loads" like `lwzx rD, rA, rB` read `(rA + rB)` into `rD`
size = type.get_size_bytes()
assert size is not None
ptr = BinaryOp.intptr(left=args.reg(1), op="+", right=args.reg(2))
expr = deref(ptr, args.regs, args.stack_info, size=size)
return as_type(expr, type, silent=True)
def strip_macros(arg: Argument) -> Argument:
"""Replace %lo(...) by 0, and assert that there are no %hi(...). We assume that
%hi's only ever occur in lui, where we expand them to an entire value, and not
just the upper part. This preserves semantics in most cases (though not when %hi's
are reused for different %lo's...)"""
if isinstance(arg, Macro):
if arg.macro_name in ["sda2", "sda21"]:
return arg.argument
if arg.macro_name == "hi":
raise DecompFailure("%hi macro outside of lui")
if arg.macro_name not in ["lo", "l"]:
raise DecompFailure(f"Unrecognized linker macro %{arg.macro_name}")
# This is sort of weird; for `symbol@l` we return 0 here and assume
# that this @l is always perfectly paired with one other @ha.
# However, with `literal@l`, we return the literal value, and assume it is
# paired with another `literal@ha`. This lets us reuse `literal@ha` values,
# but assumes that we never mix literals & symbols
if isinstance(arg.argument, AsmLiteral):
return AsmLiteral(arg.argument.value)
return AsmLiteral(0)
elif isinstance(arg, AsmAddressMode) and isinstance(arg.lhs, Macro):
if arg.lhs.macro_name in ["sda2", "sda21"]:
return arg.lhs.argument
if arg.lhs.macro_name not in ["lo", "l"]:
raise DecompFailure(
f"Bad linker macro in instruction argument {arg}, expected %lo"
)
return AsmAddressMode(lhs=AsmLiteral(0), rhs=arg.rhs)
else:
return arg
@dataclass
class AbiArgSlot:
offset: int
reg: Optional[Register]
type: Type
name: Optional[str] = None
comment: Optional[str] = None
@dataclass
class Abi:
arg_slots: List[AbiArgSlot]
possible_slots: List[AbiArgSlot]
def reg_always_set(node: Node, reg: Register, *, dom_set: bool) -> bool:
if node.immediate_dominator is None:
return False
seen = {node.immediate_dominator}
stack = node.parents[:]
while stack:
n = stack.pop()
if n == node.immediate_dominator and not dom_set:
return False
if n in seen:
continue
seen.add(n)
clobbered: Optional[bool] = None
for instr in n.block.instructions:
with current_instr(instr):
if reg in instr.outputs:
clobbered = False
elif reg in instr.clobbers:
clobbered = True
if clobbered == True:
return False
if clobbered is None:
stack.extend(n.parents)
return True
def pick_phi_assignment_nodes(
reg: Register, nodes: List[Node], expr: Expression
) -> List[Node]:
"""
As part of `assign_phis()`, we need to pick a set of nodes where we can emit a
`SetPhiStmt` that assigns the phi for `reg` to `expr`.
The final register state for `reg` for each node in `nodes` is `expr`,
so the best case would be finding a single dominating node for the assignment.
"""
# Find the set of nodes which dominate *all* of `nodes`, sorted by number
# of dominators. (This puts "earlier" nodes at the beginning of the list.)
dominators = sorted(
set.intersection(*(node.dominators for node in nodes)),
key=lambda n: len(n.dominators),
)
# Check the dominators for a node with the correct final state for `reg`
for node in dominators:
regs = get_block_info(node).final_register_states
raw = regs.get_raw(reg)
meta = regs.get_meta(reg)
if raw is None or meta is None or meta.force:
continue
if raw == expr:
return [node]
# We couldn't find anything, so fall back to the naive solution
# TODO: In some cases there may be a better solution (e.g. one that requires 2 nodes)
return nodes
def assign_phis(used_phis: List[PhiExpr], stack_info: StackInfo) -> None:
i = 0
# Iterate over used phis until there are no more remaining. New ones may
# appear during iteration, hence the while loop.
while i < len(used_phis):
phi = used_phis[i]
assert phi.num_usages > 0
assert len(phi.node.parents) >= 2
# Group parent nodes by the value of their phi register
equivalent_nodes: DefaultDict[Expression, List[Node]] = defaultdict(list)
for node in phi.node.parents:
expr = get_block_info(node).final_register_states[phi.reg]
expr.type.unify(phi.type)
equivalent_nodes[expr].append(node)
exprs = list(equivalent_nodes.keys())
first_uw = early_unwrap(exprs[0])
if all(early_unwrap(e) == first_uw for e in exprs[1:]):
# All the phis have the same value (e.g. because we recomputed an
# expression after a store, or restored a register after a function
# call). Just use that value instead of introducing a phi node.
# TODO: the unwrapping here is necessary, but also kinda sketchy:
# we may set as replacement_expr an expression that really shouldn't
# be repeated, e.g. a StructAccess. It would make sense to use less
# eager unwrapping, and/or to emit an EvalOnceExpr at this point
# (though it's too late for it to be able to participate in the
# prevent_later_uses machinery).
phi.replacement_expr = as_type(first_uw, phi.type, silent=True)
for _ in range(phi.num_usages):
first_uw.use()
else:
for expr, nodes in equivalent_nodes.items():
for node in pick_phi_assignment_nodes(phi.reg, nodes, expr):
block_info = get_block_info(node)
expr = block_info.final_register_states[phi.reg]
if isinstance(expr, PhiExpr):
# Explicitly mark how the expression is used if it's a phi,
# so we can propagate phi sets (to get rid of temporaries).
expr.use(from_phi=phi)
else:
expr.use()
typed_expr = as_type(expr, phi.type, silent=True)
block_info.to_write.append(SetPhiStmt(phi, typed_expr))
i += 1
name_counter: Dict[Register, int] = {}
for phi in used_phis:
if not phi.replacement_expr and phi.propagates_to() == phi:
counter = name_counter.get(phi.reg, 0) + 1
name_counter[phi.reg] = counter
output_reg_name = stack_info.function.reg_formatter.format(phi.reg)
prefix = f"phi_{output_reg_name}"
phi.name = f"{prefix}_{counter}" if counter > 1 else prefix
stack_info.phi_vars.append(phi)
def propagate_register_meta(nodes: List[Node], reg: Register) -> None:
"""Propagate RegMeta bits forwards/backwards."""
non_terminal: List[Node] = [n for n in nodes if not isinstance(n, TerminalNode)]
# Set `is_read` based on `read_inherited`.
for n in non_terminal:
if reg in get_block_info(n).final_register_states.read_inherited:
for p in n.parents:
par_meta = get_block_info(p).final_register_states.get_meta(reg)
if par_meta:
par_meta.is_read = True
# Propagate `is_read` backwards.
todo = non_terminal[:]
while todo:
n = todo.pop()
meta = get_block_info(n).final_register_states.get_meta(reg)
for p in n.parents:
par_meta = get_block_info(p).final_register_states.get_meta(reg)
if (par_meta and not par_meta.is_read) and (
meta and meta.inherited and meta.is_read
):
par_meta.is_read = True
todo.append(p)
# Set `uninteresting` and propagate it, `function_return`, and `in_pattern` forwards.
# Start by assuming inherited values are all set; they will get unset iteratively,
# but for cyclic dependency purposes we want to assume them set.
for n in non_terminal:
meta = get_block_info(n).final_register_states.get_meta(reg)
if meta:
if meta.inherited:
meta.uninteresting = True
meta.function_return = True
meta.in_pattern = True
else:
meta.uninteresting |= (
meta.is_read or meta.function_return or meta.in_pattern
)
todo = non_terminal[:]
while todo:
n = todo.pop()
if isinstance(n, TerminalNode):
continue
meta = get_block_info(n).final_register_states.get_meta(reg)
if not meta or not meta.inherited:
continue
all_uninteresting = True
all_function_return = True
all_in_pattern = True
for p in n.parents:
par_meta = get_block_info(p).final_register_states.get_meta(reg)
if par_meta:
all_uninteresting &= par_meta.uninteresting
all_function_return &= par_meta.function_return
all_in_pattern &= par_meta.in_pattern
if meta.uninteresting and not all_uninteresting and not meta.is_read:
meta.uninteresting = False
todo.extend(n.children())
if meta.function_return and not all_function_return:
meta.function_return = False
todo.extend(n.children())
if meta.in_pattern and not all_in_pattern:
meta.in_pattern = False
todo.extend(n.children())
def determine_return_register(
return_blocks: List[BlockInfo], fn_decl_provided: bool, arch: Arch
) -> Optional[Register]:
"""Determine which of the arch's base_return_regs (i.e. v0, f0) is the most
likely to contain the return value, or if the function is likely void."""
def priority(block_info: BlockInfo, reg: Register) -> int:
meta = block_info.final_register_states.get_meta(reg)
if not meta:
return 4
if meta.uninteresting:
return 2
if meta.in_pattern:
return 1
if meta.function_return:
return 0
return 3
if not return_blocks:
return None
best_reg: Optional[Register] = None
best_prio = -1
for reg in arch.base_return_regs:
prios = [priority(b, reg) for b in return_blocks]
max_prio = max(prios)
if max_prio == 4:
# Register is not always set, skip it
continue
if max_prio <= 2 and not fn_decl_provided:
# Register is always read after being written, or comes from a
# function call; seems unlikely to be an intentional return.
# Skip it, unless we have a known non-void return type.
continue
if max_prio > best_prio:
best_prio = max_prio
best_reg = reg
return best_reg
def translate_node_body(node: Node, regs: RegInfo, stack_info: StackInfo) -> BlockInfo:
"""
Given a node and current register contents, return a BlockInfo containing
the translated AST for that node.
"""
to_write: List[Union[Statement]] = []
local_var_writes: Dict[LocalVar, Tuple[Register, Expression]] = {}
subroutine_args: Dict[int, Expression] = {}
branch_condition: Optional[Condition] = None
switch_expr: Optional[Expression] = None
has_custom_return: bool = False
has_function_call: bool = False
in_pattern: bool = False
arch = stack_info.global_info.arch
def eval_once(
expr: Expression,
*,
emit_exactly_once: bool,
trivial: bool,
prefix: str = "",
reuse_var: Optional[Var] = None,
) -> EvalOnceExpr:
if emit_exactly_once:
# (otherwise this will be marked used once num_usages reaches 1)
expr.use()
elif "_fictive_" in prefix and isinstance(expr, EvalOnceExpr):
# Avoid creating additional EvalOnceExprs for fictive Registers
# so they're less likely to appear in the output
return expr
assert reuse_var or prefix
if prefix == "condition_bit":
prefix = "cond"
var = reuse_var or Var(stack_info, "temp_" + prefix)
expr = EvalOnceExpr(
wrapped_expr=expr,
var=var,
type=expr.type,
emit_exactly_once=emit_exactly_once,
trivial=trivial,
)
var.num_usages += 1
stmt = EvalOnceStmt(expr)
to_write.append(stmt)
stack_info.temp_vars.append(stmt)
return expr
def prevent_later_uses(expr_filter: Callable[[Expression], bool]) -> None:
"""Prevent later uses of registers whose contents match a callback filter."""
for r in regs.contents.keys():
data = regs.contents.get(r)
assert data is not None
expr = data.value
if not data.meta.force and expr_filter(expr):
# Mark the register as "if used, emit the expression's once
# var". We usually always have a once var at this point,
# but if we don't, create one.
if not isinstance(expr, EvalOnceExpr):
expr = eval_once(
expr,
emit_exactly_once=False,
trivial=False,
prefix=stack_info.function.reg_formatter.format(r),
)
# This write isn't changing the value of the register; it didn't need
# to be declared as part of the current instruction's inputs/outputs.
regs.unchecked_set_with_meta(r, expr, replace(data.meta, force=True))
def prevent_later_value_uses(sub_expr: Expression) -> None:
"""Prevent later uses of registers that recursively contain a given
subexpression."""
# Unused PassedInArg are fine; they can pass the uses_expr test simply based
# on having the same variable name. If we didn't filter them out here it could
# cause them to be incorrectly passed as function arguments -- the function
# call logic sees an opaque wrapper and doesn't realize that they are unused
# arguments that should not be passed on.
prevent_later_uses(
lambda e: uses_expr(e, lambda e2: e2 == sub_expr)
and not (isinstance(e, PassedInArg) and not e.copied)
)
def prevent_later_function_calls() -> None:
"""Prevent later uses of registers that recursively contain a function call."""
prevent_later_uses(lambda e: uses_expr(e, lambda e2: isinstance(e2, FuncCall)))
def prevent_later_reads() -> None:
"""Prevent later uses of registers that recursively contain a read."""
contains_read = lambda e: isinstance(e, (StructAccess, ArrayAccess))
prevent_later_uses(lambda e: uses_expr(e, contains_read))
def set_reg_maybe_return(reg: Register, expr: Expression) -> None:
regs.set_with_meta(reg, expr, RegMeta(in_pattern=in_pattern))
def set_reg(reg: Register, expr: Optional[Expression]) -> Optional[Expression]:
if expr is None:
if reg in regs:
del regs[reg]
return None
if isinstance(expr, LocalVar):
if (
isinstance(node, ReturnNode)
and stack_info.maybe_get_register_var(reg)
and stack_info.in_callee_save_reg_region(expr.value)
and reg in stack_info.callee_save_regs
):
# Elide saved register restores with --reg-vars (it doesn't
# matter in other cases).
return None
if expr in local_var_writes:
# Elide register restores (only for the same register for now,
# to be conversative).
orig_reg, orig_expr = local_var_writes[expr]
if orig_reg == reg:
expr = orig_expr
uw_expr = expr
if not isinstance(expr, Literal):
expr = eval_once(
expr,
emit_exactly_once=False,
trivial=is_trivial_expression(expr),
prefix=stack_info.function.reg_formatter.format(reg),
)
if reg == Register("zero"):
# Emit the expression as is. It's probably a volatile load.
expr.use()
to_write.append(ExprStmt(expr))
else:
dest = stack_info.maybe_get_register_var(reg)
if dest is not None:
stack_info.use_register_var(dest)
# Avoid emitting x = x, but still refresh EvalOnceExpr's etc.
if not (isinstance(uw_expr, RegisterVar) and uw_expr.reg == reg):
source = as_type(expr, dest.type, True)
source.use()
to_write.append(StoreStmt(source=source, dest=dest))
expr = dest
set_reg_maybe_return(reg, expr)
return expr
def clear_caller_save_regs() -> None:
for reg in arch.temp_regs:
if reg in regs:
del regs[reg]
def maybe_clear_local_var_writes(func_args: List[Expression]) -> None:
# Clear the `local_var_writes` dict if any of the `func_args` contain
# a reference to a stack var. (The called function may modify the stack,
# replacing the value we have in `local_var_writes`.)
for arg in func_args:
if uses_expr(
arg,
lambda expr: isinstance(expr, AddressOf)
and isinstance(expr.expr, LocalVar),
):
local_var_writes.clear()
return
def process_instr(instr: Instruction) -> None:
nonlocal branch_condition, switch_expr, has_function_call, in_pattern
in_pattern = instr.in_pattern
mnemonic = instr.mnemonic
arch_mnemonic = instr.arch_mnemonic(arch)
args = InstrArgs(instr.args, regs, stack_info)
expr: Expression
# Figure out what code to generate!
if mnemonic in arch.instrs_ignore:
pass
elif mnemonic in arch.instrs_store or mnemonic in arch.instrs_store_update:
# Store a value in a permanent place.
if mnemonic in arch.instrs_store:
to_store = arch.instrs_store[mnemonic](args)
else:
# PPC specific store-and-update instructions
# `stwu r3, 8(r4)` is equivalent to `$r3 = *($r4 + 8); $r4 += 8;`
to_store = arch.instrs_store_update[mnemonic](args)
# Update the register in the second argument
update = args.memory_ref(1)
if not isinstance(update, AddressMode):
raise DecompFailure(
f"Unhandled store-and-update arg in {instr}: {update!r}"
)
set_reg(
update.rhs,
add_imm(args.regs[update.rhs], Literal(update.offset), stack_info),
)
if to_store is None:
# Elided register preserval.
pass
elif isinstance(to_store.dest, SubroutineArg):
# About to call a subroutine with this argument. Skip arguments for the
# first four stack slots; they are also passed in registers.
if to_store.dest.value >= 0x10:
subroutine_args[to_store.dest.value] = to_store.source
else:
if isinstance(to_store.dest, LocalVar):
stack_info.add_local_var(to_store.dest)
raw_value = to_store.source
if isinstance(raw_value, Cast) and raw_value.reinterpret:
# When preserving values on the stack across function calls,
# ignore the type of the stack variable. The same stack slot
# might be used to preserve values of different types.
raw_value = raw_value.expr
local_var_writes[to_store.dest] = (args.reg_ref(0), raw_value)
# Emit a write. This includes four steps:
# - mark the expression as used (since writes are always emitted)
# - mark the dest used (if it's a struct access it needs to be
# evaluated, though ideally we would not mark the top-level expression
# used; it may cause early emissions that shouldn't happen)
# - mark other usages of the dest as "emit before this point if used".
# - emit the actual write.
#
# Note that the prevent_later_value_uses step happens after use(), since
# the stored expression is allowed to reference its destination var,
# but before the write is written, since prevent_later_value_uses might
# emit writes of its own that should go before this write. In practice
# that probably never occurs -- all relevant register contents should be
# EvalOnceExpr's that can be emitted at their point of creation, but
# I'm not 100% certain that that's always the case and will remain so.
to_store.source.use()
to_store.dest.use()
prevent_later_value_uses(to_store.dest)
prevent_later_function_calls()
to_write.append(to_store)
elif mnemonic in arch.instrs_source_first:
# Just 'mtc1'. It's reversed, so we have to specially handle it.
set_reg(args.reg_ref(1), arch.instrs_source_first[mnemonic](args))
elif mnemonic in arch.instrs_branches:
assert branch_condition is None
branch_condition = arch.instrs_branches[mnemonic](args)
elif mnemonic in arch.instrs_float_branches:
assert branch_condition is None
cond_bit = regs[Register("condition_bit")]
if not isinstance(cond_bit, BinaryOp):
cond_bit = ExprCondition(cond_bit, type=cond_bit.type)
if arch_mnemonic == "mips:bc1t":
branch_condition = cond_bit
elif arch_mnemonic == "mips:bc1f":
branch_condition = cond_bit.negated()
elif mnemonic in arch.instrs_jumps:
if arch_mnemonic == "ppc:bctr":
# Switch jump
assert isinstance(node, SwitchNode)
switch_expr = args.regs[Register("ctr")]
elif arch_mnemonic == "mips:jr":
# MIPS:
if args.reg_ref(0) == arch.return_address_reg:
# Return from the function.
assert isinstance(node, ReturnNode)
else:
# Switch jump.
assert isinstance(node, SwitchNode)
switch_expr = args.reg(0)
elif arch_mnemonic == "ppc:blr":
assert isinstance(node, ReturnNode)
else:
assert False, f"Unhandled jump mnemonic {arch_mnemonic}"
elif mnemonic in arch.instrs_fn_call:
if arch_mnemonic in ["mips:jal", "ppc:bl"]:
fn_target = args.imm(0)
if not (
(
isinstance(fn_target, AddressOf)
and isinstance(fn_target.expr, GlobalSymbol)
)
or isinstance(fn_target, Literal)
):
raise DecompFailure(
f"Target of function call must be a symbol, not {fn_target}"
)
elif arch_mnemonic == "ppc:blrl":
fn_target = args.regs[Register("lr")]
elif arch_mnemonic == "ppc:bctrl":
fn_target = args.regs[Register("ctr")]
elif arch_mnemonic == "mips:jalr":
fn_target = args.reg(1)
else:
assert False, f"Unhandled fn call mnemonic {arch_mnemonic}"
fn_target = as_function_ptr(fn_target)
fn_sig = fn_target.type.get_function_pointer_signature()
assert fn_sig is not None, "known function pointers must have a signature"
likely_regs: Dict[Register, bool] = {}
for reg, data in regs.contents.items():
# We use a much stricter filter for PPC than MIPS, because the same
# registers can be used arguments & return values.
# The ABI can also mix & match the rN & fN registers, which makes the
# "require" heuristic less powerful.
#
# - `meta.inherited` will only be False for registers set in *this* basic block
# - `meta.function_return` will only be accurate for registers set within this
# basic block because we have not called `propagate_register_meta` yet.
# Within this block, it will be True for registers that were return values.
if arch.arch == Target.ArchEnum.PPC and (
data.meta.inherited or data.meta.function_return
):
likely_regs[reg] = False
elif data.meta.in_pattern:
# Like `meta.function_return` mentioned above, `meta.in_pattern` will only be
# accurate for registers set within this basic block.
likely_regs[reg] = False
elif isinstance(data.value, PassedInArg) and not data.value.copied:
likely_regs[reg] = False
else:
likely_regs[reg] = True
abi = arch.function_abi(fn_sig, likely_regs, for_call=True)
func_args: List[Expression] = []
for slot in abi.arg_slots:
if slot.reg:
expr = regs[slot.reg]
elif slot.offset in subroutine_args:
expr = subroutine_args.pop(slot.offset)
else:
expr = ErrorExpr(
f"Unable to find stack arg {slot.offset:#x} in block"
)
func_args.append(
CommentExpr.wrap(
as_type(expr, slot.type, True), prefix=slot.comment
)
)
for slot in abi.possible_slots:
assert slot.reg is not None
func_args.append(regs[slot.reg])
# Add the arguments after a3.
# TODO: limit this based on abi.arg_slots. If the function type is known
# and not variadic, this list should be empty.
for _, arg in sorted(subroutine_args.items()):
if fn_sig.params_known and not fn_sig.is_variadic:
func_args.append(CommentExpr.wrap(arg, prefix="extra?"))
else:
func_args.append(arg)
if not fn_sig.params_known:
while len(func_args) > len(fn_sig.params):
fn_sig.params.append(FunctionParam())
# When the function signature isn't provided, the we only assume that each
# parameter is "simple" (<=4 bytes, no return struct, etc.). This may not
# match the actual function signature, but it's the best we can do.
# Without that assumption, the logic from `function_abi` would be needed here.
for i, (arg_expr, param) in enumerate(zip(func_args, fn_sig.params)):
func_args[i] = as_type(arg_expr, param.type.decay(), True)
# Reset subroutine_args, for the next potential function call.
subroutine_args.clear()
call: Expression = FuncCall(
fn_target, func_args, fn_sig.return_type.weaken_void_ptr()
)
call = eval_once(call, emit_exactly_once=True, trivial=False, prefix="ret")
# Clear out caller-save registers, for clarity and to ensure that
# argument regs don't get passed into the next function.
clear_caller_save_regs()
# Clear out local var write tracking if any argument contains a stack
# reference. That dict is used to track register saves/restores, which
# are unreliable if we call a function with a stack reference.
maybe_clear_local_var_writes(func_args)
# Prevent reads and function calls from moving across this call.
# This isn't really right, because this call might be moved later,
# and then this prevention should also be... but it's the best we
# can do with the current code architecture.
prevent_later_function_calls()
prevent_later_reads()
return_reg_vals = arch.function_return(call)
for out in instr.outputs:
if not isinstance(out, Register):
continue
val = return_reg_vals[out]
if not isinstance(val, SecondF64Half):
val = eval_once(
val,
emit_exactly_once=False,
trivial=False,
prefix=stack_info.function.reg_formatter.format(out),
)
regs.set_with_meta(out, val, RegMeta(function_return=True))
has_function_call = True
elif mnemonic in arch.instrs_float_comp:
expr = arch.instrs_float_comp[mnemonic](args)
regs[Register("condition_bit")] = expr
elif mnemonic in arch.instrs_hi_lo:
hi, lo = arch.instrs_hi_lo[mnemonic](args)
set_reg(Register("hi"), hi)
set_reg(Register("lo"), lo)
elif mnemonic in arch.instrs_implicit_destination:
reg, expr_fn = arch.instrs_implicit_destination[mnemonic]
set_reg(reg, expr_fn(args))
elif mnemonic in arch.instrs_ppc_compare:
if instr.args[0] != Register("cr0"):
raise DecompFailure(
f"Instruction {instr} not supported (first arg is not $cr0)"
)
set_reg(Register("cr0_eq"), arch.instrs_ppc_compare[mnemonic](args, "=="))
set_reg(Register("cr0_gt"), arch.instrs_ppc_compare[mnemonic](args, ">"))
set_reg(Register("cr0_lt"), arch.instrs_ppc_compare[mnemonic](args, "<"))
set_reg(Register("cr0_so"), Literal(0))
elif mnemonic in arch.instrs_no_dest:
stmt = arch.instrs_no_dest[mnemonic](args)
to_write.append(stmt)
elif mnemonic.rstrip(".") in arch.instrs_destination_first:
target = args.reg_ref(0)
val = arch.instrs_destination_first[mnemonic.rstrip(".")](args)
# TODO: IDO tends to keep variables within single registers. Thus,
# if source = target, maybe we could overwrite that variable instead
# of creating a new one?
target_val = set_reg(target, val)
mn_parts = arch_mnemonic.split(".")
if arch_mnemonic.startswith("ppc:") and arch_mnemonic.endswith("."):
# PPC instructions suffixed with . set condition bits (CR0) based on the result value
if target_val is None:
target_val = val
set_reg(
Register("cr0_eq"),
BinaryOp.icmp(target_val, "==", Literal(0, type=target_val.type)),
)
# Use manual casts for cr0_gt/cr0_lt so that the type of target_val is not modified
# until the resulting bit is .use()'d.
target_s32 = Cast(
target_val, reinterpret=True, silent=True, type=Type.s32()
)
set_reg(
Register("cr0_gt"),
BinaryOp(target_s32, ">", Literal(0), type=Type.s32()),
)
set_reg(
Register("cr0_lt"),
BinaryOp(target_s32, "<", Literal(0), type=Type.s32()),
)
set_reg(
Register("cr0_so"),
fn_op("MIPS2C_OVERFLOW", [target_val], type=Type.s32()),
)
elif (
len(mn_parts) >= 2
and mn_parts[0].startswith("mips:")
and mn_parts[1] == "d"
) or arch_mnemonic == "mips:ldc1":
set_reg(target.other_f64_reg(), SecondF64Half())
elif mnemonic in arch.instrs_load_update:
target = args.reg_ref(0)
val = arch.instrs_load_update[mnemonic](args)
set_reg(target, val)
if arch_mnemonic in ["ppc:lwzux", "ppc:lhzux", "ppc:lbzux"]:
# In `rD, rA, rB`, update `rA = rA + rB`
update_reg = args.reg_ref(1)
offset = args.reg(2)
else:
# In `rD, rA(N)`, update `rA = rA + N`
update = args.memory_ref(1)
if not isinstance(update, AddressMode):
raise DecompFailure(
f"Unhandled store-and-update arg in {instr}: {update!r}"
)
update_reg = update.rhs
offset = Literal(update.offset)
if update_reg == target:
raise DecompFailure(
f"Invalid instruction, rA and rD must be different in {instr}"
)
set_reg(update_reg, add_imm(args.regs[update_reg], offset, stack_info))
else:
expr = ErrorExpr(f"unknown instruction: {instr}")
if arch_mnemonic.startswith("ppc:") and arch_mnemonic.endswith("."):
# Unimplemented PPC instructions that modify CR0
set_reg(Register("cr0_eq"), expr)
set_reg(Register("cr0_gt"), expr)
set_reg(Register("cr0_lt"), expr)
set_reg(Register("cr0_so"), expr)
if args.count() >= 1 and isinstance(args.raw_arg(0), Register):
reg = args.reg_ref(0)
expr = eval_once(
expr,
emit_exactly_once=True,
trivial=False,
prefix=stack_info.function.reg_formatter.format(reg),
)
if reg != Register("zero"):
set_reg_maybe_return(reg, expr)
else:
to_write.append(ExprStmt(expr))
for instr in node.block.instructions:
with regs.current_instr(instr):
process_instr(instr)
if branch_condition is not None:
branch_condition.use()
switch_control: Optional[SwitchControl] = None
if switch_expr is not None:
switch_control = SwitchControl.from_expr(switch_expr)
switch_control.control_expr.use()
return BlockInfo(
to_write=to_write,
return_value=None,
switch_control=switch_control,
branch_condition=branch_condition,
final_register_states=regs,
has_function_call=has_function_call,
)
def translate_graph_from_block(
node: Node,
regs: RegInfo,
stack_info: StackInfo,
used_phis: List[PhiExpr],
return_blocks: List[BlockInfo],
options: Options,
) -> None:
"""
Given a FlowGraph node and a dictionary of register contents, give that node
its appropriate BlockInfo (which contains the AST of its code).
"""
if options.debug:
print(f"\nNode in question: {node}")
# Translate the given node and discover final register states.
try:
block_info = translate_node_body(node, regs, stack_info)
if options.debug:
print(block_info)
except Exception as e: # TODO: handle issues better
if options.stop_on_error:
raise
instr: Optional[Instruction] = None
if isinstance(e, InstrProcessingFailure) and isinstance(e.__cause__, Exception):
instr = e.instr
e = e.__cause__
if isinstance(e, DecompFailure):
emsg = str(e)
print(emsg)
else:
tb = e.__traceback__
traceback.print_exception(None, e, tb)
emsg = str(e) or traceback.format_tb(tb)[-1]
emsg = emsg.strip().split("\n")[-1].strip()
error_stmts: List[Statement] = [CommentStmt(f"Error: {emsg}")]
if instr is not None:
print(
f"Error occurred while processing instruction: {instr}", file=sys.stderr
)
error_stmts.append(CommentStmt(f"At instruction: {instr}"))
print(file=sys.stderr)
block_info = BlockInfo(
to_write=error_stmts,
return_value=None,
switch_control=None,
branch_condition=ErrorExpr(),
final_register_states=regs,
has_function_call=False,
)
node.block.add_block_info(block_info)
if isinstance(node, ReturnNode):
return_blocks.append(block_info)
# Translate everything dominated by this node, now that we know our own
# final register state. This will eventually reach every node.
for child in node.immediately_dominates:
if isinstance(child, TerminalNode):
continue
new_regs = RegInfo(stack_info=stack_info)
for reg, data in regs.contents.items():
new_regs.set_with_meta(
reg, data.value, RegMeta(inherited=True, force=data.meta.force)
)
phi_regs = (
r for r in locs_clobbered_until_dominator(child) if isinstance(r, Register)
)
for reg in phi_regs:
if reg_always_set(child, reg, dom_set=(reg in regs)):
expr: Optional[Expression] = stack_info.maybe_get_register_var(reg)
if expr is None:
expr = PhiExpr(
reg=reg, node=child, used_phis=used_phis, type=Type.any_reg()
)
new_regs.set_with_meta(reg, expr, RegMeta(inherited=True))
elif reg in new_regs:
del new_regs[reg]
translate_graph_from_block(
child, new_regs, stack_info, used_phis, return_blocks, options
)
def resolve_types_late(stack_info: StackInfo) -> None:
"""
After translating a function, perform a final type-resolution pass.
"""
# Final check over stack var types. Because of delayed type unification, some
# locations should now be marked as "weak".
for location in stack_info.weak_stack_var_types.keys():
stack_info.get_stack_var(location, store=False)
# Use dereferences to determine pointer types
struct_type_map = stack_info.get_struct_type_map()
for var, offset_type_map in struct_type_map.items():
if len(offset_type_map) == 1 and 0 in offset_type_map:
# var was probably a plain pointer, not a struct
# Try to unify it with the appropriate pointer type,
# to fill in the type if it does not already have one
type = offset_type_map[0]
var.type.unify(Type.ptr(type))
@dataclass
class FunctionInfo:
stack_info: StackInfo
flow_graph: FlowGraph
return_type: Type
symbol: GlobalSymbol
@dataclass
class GlobalInfo:
asm_data: AsmData
arch: Arch
target: Target
local_functions: Set[str]
typemap: TypeMap
typepool: TypePool
global_symbol_map: Dict[str, GlobalSymbol] = field(default_factory=dict)
def asm_data_value(self, sym_name: str) -> Optional[AsmDataEntry]:
return self.asm_data.values.get(sym_name)
def address_of_gsym(self, sym_name: str) -> AddressOf:
if sym_name in self.global_symbol_map:
sym = self.global_symbol_map[sym_name]
else:
demangled_symbol: Optional[CxxSymbol] = None
demangled_str: Optional[str] = None
if self.target.language == Target.LanguageEnum.CXX:
try:
demangled_symbol = demangle_codewarrior_parse(sym_name)
except ValueError:
pass
else:
demangled_str = str(demangled_symbol)
sym = self.global_symbol_map[sym_name] = GlobalSymbol(
symbol_name=sym_name,
type=Type.any(),
asm_data_entry=self.asm_data_value(sym_name),
demangled_str=demangled_str,
)
# If the symbol is a C++ vtable, try to build a custom type for it by parsing it
if (
self.target.language == Target.LanguageEnum.CXX
and sym_name.startswith("__vt__")
and sym.asm_data_entry is not None
):
sym.type.unify(self.vtable_type(sym_name, sym.asm_data_entry))
fn = self.typemap.functions.get(sym_name)
ctype: Optional[CType]
if fn is not None:
ctype = fn.type
else:
ctype = self.typemap.var_types.get(sym_name)
if ctype is not None:
sym.symbol_in_context = True
sym.initializer_in_typemap = (
sym_name in self.typemap.vars_with_initializers
)
sym.type.unify(Type.ctype(ctype, self.typemap, self.typepool))
if sym_name not in self.typepool.unknown_decls:
sym.type_provided = True
elif sym_name in self.local_functions:
sym.type.unify(Type.function())
# Do this after unifying the type in the typemap, so that it has lower precedence
if demangled_symbol is not None:
sym.type.unify(
Type.demangled_symbol(self.typemap, self.typepool, demangled_symbol)
)
return AddressOf(sym, type=sym.type.reference())
def vtable_type(self, sym_name: str, asm_data_entry: AsmDataEntry) -> Type:
"""
Parse MWCC vtable data to create a custom struct to represent it.
This format is not well documented, but is briefly explored in this series of posts:
https://web.archive.org/web/20220413174849/http://hacksoflife.blogspot.com/2007/02/c-objects-part-2-single-inheritance.html
"""
size = asm_data_entry.size_range_bytes()[1]
struct = StructDeclaration.unknown(
self.typepool, size=size, align=4, tag_name=sym_name
)
offset = 0
for entry in asm_data_entry.data:
if isinstance(entry, bytes):
# MWCC vtables start with a pointer to a typeid struct (or NULL) and an offset
if len(entry) % 4 != 0:
raise DecompFailure(
f"Unable to parse misaligned vtable data in {sym_name}"
)
for i in range(len(entry) // 4):
field_name = f"{struct.new_field_prefix}{offset:X}"
struct.try_add_field(
Type.reg32(likely_float=False), offset, field_name, size=4
)
offset += 4
else:
entry_name = entry
try:
demangled_field_sym = demangle_codewarrior_parse(entry)
if demangled_field_sym.name.qualified_name is not None:
entry_name = str(demangled_field_sym.name.qualified_name[-1])
except ValueError:
pass
field = struct.try_add_field(
self.address_of_gsym(entry).type,
offset,
name=entry_name,
size=4,
)
assert field is not None
field.known = True
offset += 4
return Type.struct(struct)
def is_function_known_void(self, sym_name: str) -> bool:
"""Return True if the function exists in the context, and has no return value"""
fn = self.typemap.functions.get(sym_name)
if fn is None:
return False
return fn.ret_type is None
def initializer_for_symbol(
self, sym: GlobalSymbol, fmt: Formatter
) -> Optional[str]:
assert sym.asm_data_entry is not None
data = sym.asm_data_entry.data[:]
def read_uint(n: int) -> Optional[int]:
"""Read the next `n` bytes from `data` as an (long) integer"""
assert 0 < n <= 8
if not data or not isinstance(data[0], bytes):
return None
if len(data[0]) < n:
return None
bs = data[0][:n]
data[0] = data[0][n:]
if not data[0]:
del data[0]
value = 0
for b in bs:
value = (value << 8) | b
return value
def read_pointer() -> Optional[Expression]:
"""Read the next label from `data`"""
if not data or not isinstance(data[0], str):
return None
label = data[0]
data.pop(0)
return self.address_of_gsym(label)
def for_type(type: Type) -> Optional[str]:
"""Return the initializer for a single element of type `type`"""
if type.is_struct() or type.is_array():
struct_fields = type.get_initializer_fields()
if not struct_fields:
return None
members = []
for field in struct_fields:
if isinstance(field, int):
# Check that all padding bytes are 0
for i in range(field):
padding = read_uint(1)
if padding != 0:
return None
else:
m = for_type(field)
if m is None:
return None
members.append(m)
return fmt.format_array(members)
if type.is_reg():
size = type.get_size_bytes()
if not size:
return None
if size == 4:
ptr = read_pointer()
if ptr is not None:
return as_type(ptr, type, silent=True).format(fmt)
value = read_uint(size)
if value is not None:
enum_name = type.get_enum_name(value)
if enum_name is not None:
return enum_name
expr = as_type(Literal(value), type, True)
return elide_casts_for_store(expr).format(fmt)
# Type kinds K_FN and K_VOID do not have initializers
return None
return for_type(sym.type)
def find_forward_declares_needed(self, functions: List[FunctionInfo]) -> Set[str]:
funcs_seen = set()
forward_declares_needed = self.asm_data.mentioned_labels
for func in functions:
funcs_seen.add(func.stack_info.function.name)
for instr in func.stack_info.function.body:
if not isinstance(instr, Instruction):
continue
for arg in instr.args:
if isinstance(arg, AsmGlobalSymbol):
func_name = arg.symbol_name
elif isinstance(arg, Macro) and isinstance(
arg.argument, AsmGlobalSymbol
):
func_name = arg.argument.symbol_name
else:
continue
if func_name in self.local_functions:
if func_name not in funcs_seen:
forward_declares_needed.add(func_name)
return forward_declares_needed
def global_decls(
self,
fmt: Formatter,
decls: Options.GlobalDeclsEnum,
functions: List[FunctionInfo],
) -> str:
# Format labels from symbol_type_map into global declarations.
# As the initializers are formatted, this may cause more symbols
# to be added to the global_symbol_map.
forward_declares_needed = self.find_forward_declares_needed(functions)
lines = []
processed_names: Set[str] = set()
while True:
names: AbstractSet[str] = self.global_symbol_map.keys()
if decls == Options.GlobalDeclsEnum.ALL:
names |= self.asm_data.values.keys()
names -= processed_names
if not names:
break
for name in sorted(names):
processed_names.add(name)
sym = self.address_of_gsym(name).expr
assert isinstance(sym, GlobalSymbol)
data_entry = sym.asm_data_entry
# Is the label defined in this unit (in the active AsmData file(s))
is_in_file = data_entry is not None or name in self.local_functions
# Is the label externally visible (mentioned in the context file)
is_global = sym.symbol_in_context
# Is the label a symbol in .rodata?
is_const = data_entry is not None and data_entry.is_readonly
if data_entry and data_entry.is_jtbl:
# Skip jump tables
continue
if is_in_file and is_global and sym.type.is_function():
# Skip externally-declared functions that are defined here
continue
if self.local_functions == {name}:
# Skip the function being decompiled if just a single one
continue
if not is_in_file and sym.type_provided:
# Skip externally-declared symbols that are defined in other files
continue
# TODO: Use original MIPSFile ordering for variables
sort_order = (
not sym.type.is_function(),
is_global,
is_in_file,
is_const,
name,
)
qualifier = ""
value: Optional[str] = None
comments = []
# Determine type qualifier: static, extern, or neither
if is_in_file and is_global:
qualifier = ""
elif is_in_file:
qualifier = "static"
else:
qualifier = "extern"
if sym.type.is_function():
comments.append(qualifier)
qualifier = ""
# Try to guess if the symbol is an array (and if it is, its dimension) if
# we have a data entry for it, and the symbol is either not in the typemap
# or was a variable-length array there ("VLA", e.g. `int []`)
# (Otherwise, if the dim is provided by the typemap, we trust it.)
element_type, array_dim = sym.type.get_array()
is_vla = element_type is not None and (
array_dim is None or array_dim <= 0
)
if data_entry and (not sym.type_provided or is_vla):
# The size of the data entry is uncertain, because of padding
# between sections. Generally `(max_data_size - data_size) < 16`.
min_data_size, max_data_size = data_entry.size_range_bytes()
# The size of the element type (not the size of the array type)
if element_type is None:
element_type = sym.type
# If we don't know the type, we can't guess the array_dim
type_size = element_type.get_size_bytes()
if type_size:
potential_dim, extra_bytes = sym.potential_array_dim(type_size)
if potential_dim == 0 and extra_bytes > 0:
# The type is too big for our data. (not an array)
comments.append(
f"type too large by {fmt.format_int(type_size - extra_bytes)}"
)
elif potential_dim > 1 or is_vla:
# NB: In general, replacing the types of Expressions can be sketchy.
# However, the GlobalSymbol here came from address_of_gsym(), which
# always returns a reference to the element_type.
array_dim = potential_dim
sym.type = Type.array(element_type, array_dim)
if potential_dim != 0 and extra_bytes > 0:
comments.append(
f"extra bytes: {fmt.format_int(extra_bytes)}"
)
# Try to convert the data from .data/.rodata into an initializer
if data_entry and not data_entry.is_bss:
value = self.initializer_for_symbol(sym, fmt)
if value is None:
# This warning helps distinguish .bss symbols from .data/.rodata,
# IDO only puts symbols in .bss if they don't have any initializer
comments.append("unable to generate initializer")
if is_const:
comments.append("const")
# Float & string constants are almost always inlined and can be omitted
if sym.is_string_constant():
continue
if array_dim is None and sym.type.is_likely_float():
continue
# In "none" mode, do not emit any decls
if decls == Options.GlobalDeclsEnum.NONE:
continue
# In modes except "all", skip the decl if the context file already had an initializer
if decls != Options.GlobalDeclsEnum.ALL and sym.initializer_in_typemap:
continue
# In modes except "all", skip vtable decls when compiling C++
if (
decls != Options.GlobalDeclsEnum.ALL
and self.target.language == Target.LanguageEnum.CXX
and name.startswith("__vt__")
):
continue
if (
sym.type.is_function()
and decls != Options.GlobalDeclsEnum.ALL
and name in self.local_functions
and name not in forward_declares_needed
):
continue
qualifier = f"{qualifier} " if qualifier else ""
value = f" = {value}" if value else ""
lines.append(
(
sort_order,
fmt.with_comments(
f"{qualifier}{sym.type.to_decl(name, fmt)}{value};",
comments,
)
+ "\n",
)
)
lines.sort()
return "".join(line for _, line in lines)
def narrow_func_call_outputs(
function: Function,
global_info: GlobalInfo,
) -> None:
"""
Modify the `outputs` list of function call Instructions using the context file.
For now, this only handles known-void functions, but in the future it could
be extended to select a specific register subset based on type.
"""
for instr in function.body:
if (
isinstance(instr, Instruction)
and isinstance(instr.function_target, AsmGlobalSymbol)
and global_info.is_function_known_void(instr.function_target.symbol_name)
):
instr.outputs.clear()
def translate_to_ast(
function: Function,
flow_graph: FlowGraph,
options: Options,
global_info: GlobalInfo,
) -> FunctionInfo:
"""
Given a function, produce a FlowGraph that both contains control-flow
information and has AST transformations for each block of code and
branch condition.
"""
# Initialize info about the function.
stack_info = get_stack_info(function, global_info, flow_graph)
start_regs: RegInfo = RegInfo(stack_info=stack_info)
arch = global_info.arch
start_regs[arch.stack_pointer_reg] = GlobalSymbol("sp", type=Type.ptr())
for reg in arch.saved_regs:
start_regs[reg] = stack_info.saved_reg_symbol(reg.register_name)
fn_sym = global_info.address_of_gsym(function.name).expr
assert isinstance(fn_sym, GlobalSymbol)
fn_type = fn_sym.type
fn_type.unify(Type.function())
fn_sig = Type.ptr(fn_type).get_function_pointer_signature()
assert fn_sig is not None, "fn_type is known to be a function"
return_type = fn_sig.return_type
stack_info.is_variadic = fn_sig.is_variadic
def make_arg(offset: int, type: Type) -> PassedInArg:
assert offset % 4 == 0
return PassedInArg(offset, copied=False, stack_info=stack_info, type=type)
abi = arch.function_abi(
fn_sig,
likely_regs={reg: True for reg in arch.argument_regs},
for_call=False,
)
for slot in abi.arg_slots:
stack_info.add_known_param(slot.offset, slot.name, slot.type)
if slot.reg is not None:
start_regs.set_with_meta(
slot.reg, make_arg(slot.offset, slot.type), RegMeta(uninteresting=True)
)
for slot in abi.possible_slots:
if slot.reg is not None:
start_regs.set_with_meta(
slot.reg, make_arg(slot.offset, slot.type), RegMeta(uninteresting=True)
)
if options.reg_vars == ["saved"]:
reg_vars = arch.saved_regs
elif options.reg_vars == ["most"]:
reg_vars = arch.saved_regs + arch.simple_temp_regs
elif options.reg_vars == ["all"]:
reg_vars = arch.saved_regs + arch.simple_temp_regs + arch.argument_regs
else:
reg_vars = [
stack_info.function.reg_formatter.parse(x, arch) for x in options.reg_vars
]
for reg in reg_vars:
reg_name = stack_info.function.reg_formatter.format(reg)
stack_info.add_register_var(reg, reg_name)
if options.debug:
print(stack_info)
print("\nNow, we attempt to translate:")
used_phis: List[PhiExpr] = []
return_blocks: List[BlockInfo] = []
translate_graph_from_block(
flow_graph.entry_node(),
start_regs,
stack_info,
used_phis,
return_blocks,
options,
)
for reg in arch.base_return_regs:
propagate_register_meta(flow_graph.nodes, reg)
return_reg: Optional[Register] = None
if not options.void and not return_type.is_void():
return_reg = determine_return_register(
return_blocks, fn_sym.type_provided, arch
)
if return_reg is not None:
for b in return_blocks:
if return_reg in b.final_register_states:
ret_val = b.final_register_states[return_reg]
ret_val = as_type(ret_val, return_type, True)
ret_val.use()
b.return_value = ret_val
else:
return_type.unify(Type.void())
if not fn_sig.params_known:
while len(fn_sig.params) < len(stack_info.arguments):
fn_sig.params.append(FunctionParam())
for param, arg in zip(fn_sig.params, stack_info.arguments):
param.type.unify(arg.type)
if not param.name:
param.name = arg.format(Formatter())
assign_phis(used_phis, stack_info)
resolve_types_late(stack_info)
if options.pdb_translate:
import pdb
v: Dict[str, object] = {}
fmt = Formatter()
for local in stack_info.local_vars:
var_name = local.format(fmt)
v[var_name] = local
for temp in stack_info.temp_vars:
if temp.need_decl():
var_name = temp.expr.var.format(fmt)
v[var_name] = temp.expr
for phi in stack_info.phi_vars:
assert phi.name is not None
v[phi.name] = phi
pdb.set_trace()
return FunctionInfo(stack_info, flow_graph, return_type, fn_sym)
|
import abc
from collections import defaultdict
from contextlib import contextmanager
from dataclasses import dataclass, field, replace
import math
import struct
import sys
import traceback
import typing
from typing import (
AbstractSet,
Callable,
Collection,
DefaultDict,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
)
from .c_types import CType, TypeMap
from .demangle_codewarrior import parse as demangle_codewarrior_parse, CxxSymbol
from .error import DecompFailure, static_assert_unreachable
from .flow_graph import (
ArchFlowGraph,
FlowGraph,
Function,
Node,
ReturnNode,
SwitchNode,
TerminalNode,
locs_clobbered_until_dominator,
)
from .ir_pattern import IrPattern, simplify_ir_patterns
from .options import CodingStyle, Formatter, Options, Target
from .parse_file import AsmData, AsmDataEntry
from .parse_instruction import (
ArchAsm,
Argument,
AsmAddressMode,
AsmGlobalSymbol,
AsmLiteral,
BinOp,
Instruction,
InstrProcessingFailure,
Macro,
Register,
StackLocation,
current_instr,
)
from .types import (
AccessPath,
FunctionParam,
FunctionSignature,
StructDeclaration,
Type,
TypePool,
)
InstrSet = Collection[str]
InstrMap = Mapping[str, Callable[["InstrArgs"], "Expression"]]
StmtInstrMap = Mapping[str, Callable[["InstrArgs"], "Statement"]]
CmpInstrMap = Mapping[str, Callable[["InstrArgs"], "Condition"]]
StoreInstrMap = Mapping[str, Callable[["InstrArgs"], Optional["StoreStmt"]]]
MaybeInstrMap = Mapping[str, Callable[["InstrArgs"], Optional["Expression"]]]
PairInstrMap = Mapping[str, Callable[["InstrArgs"], Tuple["Expression", "Expression"]]]
ImplicitInstrMap = Mapping[str, Tuple[Register, Callable[["InstrArgs"], "Expression"]]]
PpcCmpInstrMap = Mapping[str, Callable[["InstrArgs", str], "Expression"]]
class Arch(ArchFlowGraph):
instrs_ignore: InstrSet = set()
instrs_store: StoreInstrMap = {}
instrs_store_update: StoreInstrMap = {}
instrs_load_update: InstrMap = {}
instrs_branches: CmpInstrMap = {}
instrs_float_branches: InstrSet = set()
instrs_float_comp: CmpInstrMap = {}
instrs_ppc_compare: PpcCmpInstrMap = {}
instrs_jumps: InstrSet = set()
instrs_fn_call: InstrSet = set()
instrs_no_dest: StmtInstrMap = {}
instrs_hi_lo: PairInstrMap = {}
instrs_source_first: InstrMap = {}
instrs_destination_first: InstrMap = {}
instrs_implicit_destination: ImplicitInstrMap = {}
@abc.abstractmethod
def function_abi(
self,
fn_sig: FunctionSignature,
likely_regs: Dict[Register, bool],
*,
for_call: bool,
) -> "Abi":
"""
Compute stack positions/registers used by a function based on its type
information. Also computes a list of registers that may contain arguments,
if the function has varargs or an unknown/incomplete type.
"""
...
@abc.abstractmethod
def function_return(self, expr: "Expression") -> Dict[Register, "Expression"]:
"""
Compute register location(s) & values that will hold the return value
of the function call `expr`.
This must have a value for each register in `all_return_regs` in order to stay
consistent with `Instruction.outputs`. This is why we can't use the
function's return type, even though it may be more accurate.
"""
...
# These are defined here to avoid a circular import in flow_graph.py
ir_patterns: List[IrPattern] = []
def simplify_ir(self, flow_graph: FlowGraph) -> None:
simplify_ir_patterns(self, flow_graph, self.ir_patterns)
ASSOCIATIVE_OPS: Set[str] = {"+", "&&", "||", "&", "|", "^", "*"}
COMPOUND_ASSIGNMENT_OPS: Set[str] = {"+", "-", "*", "/", "%", "&", "|", "^", "<<", ">>"}
PSEUDO_FUNCTION_OPS: Set[str] = {"MULT_HI", "MULTU_HI", "DMULT_HI", "DMULTU_HI", "CLZ"}
def as_type(expr: "Expression", type: Type, silent: bool) -> "Expression":
type = type.weaken_void_ptr()
ptr_target_type = type.get_pointer_target()
if expr.type.unify(type):
if silent or isinstance(expr, Literal):
return expr
elif ptr_target_type is not None:
ptr_target_type_size = ptr_target_type.get_size_bytes()
field_path, field_type, _ = expr.type.get_deref_field(
0, target_size=ptr_target_type_size
)
if field_path is not None and field_type.unify(ptr_target_type):
expr = AddressOf(
StructAccess(
struct_var=expr,
offset=0,
target_size=ptr_target_type_size,
field_path=field_path,
stack_info=None,
type=field_type,
),
type=type,
)
if silent:
return expr
return Cast(expr=expr, reinterpret=True, silent=False, type=type)
def as_f32(expr: "Expression") -> "Expression":
return as_type(expr, Type.f32(), True)
def as_f64(expr: "Expression") -> "Expression":
return as_type(expr, Type.f64(), True)
def as_sintish(expr: "Expression", *, silent: bool = False) -> "Expression":
return as_type(expr, Type.sintish(), silent)
def as_uintish(expr: "Expression") -> "Expression":
return as_type(expr, Type.uintish(), False)
def as_u32(expr: "Expression") -> "Expression":
return as_type(expr, Type.u32(), False)
def as_s64(expr: "Expression", *, silent: bool = False) -> "Expression":
return as_type(expr, Type.s64(), silent)
def as_u64(expr: "Expression", *, silent: bool = False) -> "Expression":
return as_type(expr, Type.u64(), silent)
def as_intish(expr: "Expression") -> "Expression":
return as_type(expr, Type.intish(), True)
def as_int64(expr: "Expression") -> "Expression":
return as_type(expr, Type.int64(), True)
def as_intptr(expr: "Expression") -> "Expression":
return as_type(expr, Type.intptr(), True)
def as_ptr(expr: "Expression") -> "Expression":
return as_type(expr, Type.ptr(), True)
def as_function_ptr(expr: "Expression") -> "Expression":
return as_type(expr, Type.ptr(Type.function()), True)
@dataclass
class StackInfo:
function: Function
global_info: "GlobalInfo"
flow_graph: FlowGraph
allocated_stack_size: int = 0
is_leaf: bool = True
is_variadic: bool = False
uses_framepointer: bool = False
subroutine_arg_top: int = 0
callee_save_regs: Set[Register] = field(default_factory=set)
callee_save_reg_region: Tuple[int, int] = (0, 0)
unique_type_map: Dict[Tuple[str, object], "Type"] = field(default_factory=dict)
local_vars: List["LocalVar"] = field(default_factory=list)
temp_vars: List["EvalOnceStmt"] = field(default_factory=list)
phi_vars: List["PhiExpr"] = field(default_factory=list)
reg_vars: Dict[Register, "RegisterVar"] = field(default_factory=dict)
used_reg_vars: Set[Register] = field(default_factory=set)
arguments: List["PassedInArg"] = field(default_factory=list)
temp_name_counter: Dict[str, int] = field(default_factory=dict)
nonzero_accesses: Set["Expression"] = field(default_factory=set)
param_names: Dict[int, str] = field(default_factory=dict)
stack_pointer_type: Optional[Type] = None
replace_first_arg: Optional[Tuple[str, Type]] = None
weak_stack_var_types: Dict[int, Type] = field(default_factory=dict)
weak_stack_var_locations: Set[int] = field(default_factory=set)
def temp_var(self, prefix: str) -> str:
counter = self.temp_name_counter.get(prefix, 0) + 1
self.temp_name_counter[prefix] = counter
return prefix + (f"_{counter}" if counter > 1 else "")
def in_subroutine_arg_region(self, location: int) -> bool:
if self.global_info.arch.arch == Target.ArchEnum.PPC:
return False
if self.is_leaf:
return False
assert self.subroutine_arg_top is not None
return location < self.subroutine_arg_top
def in_callee_save_reg_region(self, location: int) -> bool:
lower_bound, upper_bound = self.callee_save_reg_region
if lower_bound <= location < upper_bound:
return True
# PPC saves LR in the header of the previous stack frame
if (
self.global_info.arch.arch == Target.ArchEnum.PPC
and location == self.allocated_stack_size + 4
):
return True
return False
def location_above_stack(self, location: int) -> bool:
return location >= self.allocated_stack_size
def add_known_param(self, offset: int, name: Optional[str], type: Type) -> None:
# A common pattern in C for OOP-style polymorphism involves casting a general "base" struct
# to a specific "class" struct, where the first member of the class struct is the base struct.
#
# For the first argument of the function, if it is a pointer to a base struct, and there
# exists a class struct named after the first part of the function name, assume that
# this pattern is being used. Internally, treat the argument as a pointer to the *class*
# struct, even though it is only a pointer to the *base* struct in the provided context.
if offset == 0 and type.is_pointer() and self.replace_first_arg is None:
namespace = self.function.name.partition("_")[0]
base_struct_type = type.get_pointer_target()
self_struct = self.global_info.typepool.get_struct_by_tag_name(
namespace, self.global_info.typemap
)
if (
self_struct is not None
and base_struct_type is not None
and base_struct_type.is_struct()
):
# Check if `self_struct_type` contains a `base_struct_type` at offset 0
self_struct_type = Type.struct(self_struct)
field_path, field_type, _ = self_struct_type.get_field(
offset=0, target_size=base_struct_type.get_size_bytes()
)
if (
field_path is not None
and field_type.unify(base_struct_type)
and not self_struct_type.unify(base_struct_type)
):
# Success, it looks like `self_struct_type` extends `base_struct_type`.
# By default, name the local var `self`, unless the argument name is `thisx` then use `this`
self.replace_first_arg = (name or "_self", type)
name = "this" if name == "thisx" else "self"
type = Type.ptr(Type.struct(self_struct))
if name:
self.param_names[offset] = name
_, arg = self.get_argument(offset)
self.add_argument(arg)
arg.type.unify(type)
def get_param_name(self, offset: int) -> Optional[str]:
return self.param_names.get(offset)
def add_local_var(self, var: "LocalVar") -> None:
if any(v.value == var.value for v in self.local_vars):
return
self.local_vars.append(var)
# Make sure the local vars stay sorted in order on the stack.
self.local_vars.sort(key=lambda v: v.value)
def add_argument(self, arg: "PassedInArg") -> None:
if any(a.value == arg.value for a in self.arguments):
return
self.arguments.append(arg)
self.arguments.sort(key=lambda a: a.value)
def get_argument(self, location: int) -> Tuple["Expression", "PassedInArg"]:
real_location = location & -4
arg = PassedInArg(
real_location,
copied=True,
stack_info=self,
type=self.unique_type_for("arg", real_location, Type.any_reg()),
)
if real_location == location - 3:
return as_type(arg, Type.int_of_size(8), True), arg
if real_location == location - 2:
return as_type(arg, Type.int_of_size(16), True), arg
return arg, arg
def record_struct_access(self, ptr: "Expression", location: int) -> None:
if location:
self.nonzero_accesses.add(unwrap_deep(ptr))
def has_nonzero_access(self, ptr: "Expression") -> bool:
return unwrap_deep(ptr) in self.nonzero_accesses
def unique_type_for(self, category: str, key: object, default: Type) -> "Type":
key = (category, key)
if key not in self.unique_type_map:
self.unique_type_map[key] = default
return self.unique_type_map[key]
def saved_reg_symbol(self, reg_name: str) -> "GlobalSymbol":
sym_name = "saved_reg_" + reg_name
type = self.unique_type_for("saved_reg", sym_name, Type.any_reg())
return GlobalSymbol(symbol_name=sym_name, type=type)
def should_save(self, expr: "Expression", offset: Optional[int]) -> bool:
expr = early_unwrap(expr)
if isinstance(expr, GlobalSymbol) and (
expr.symbol_name.startswith("saved_reg_") or expr.symbol_name == "sp"
):
return True
if (
isinstance(expr, PassedInArg)
and not expr.copied
and (offset is None or offset == self.allocated_stack_size + expr.value)
):
return True
return False
def get_stack_var(self, location: int, *, store: bool) -> "Expression":
# See `get_stack_info` for explanation
if self.in_callee_save_reg_region(location):
# Some annoying bookkeeping instruction. To avoid
# further special-casing, just return whatever - it won't matter.
return LocalVar(location, type=Type.any_reg(), path=None)
elif self.location_above_stack(location):
ret, arg = self.get_argument(location - self.allocated_stack_size)
if not store:
self.add_argument(arg)
return ret
elif self.in_subroutine_arg_region(location):
return SubroutineArg(location, type=Type.any_reg())
else:
# Local variable
assert self.stack_pointer_type is not None
field_path, field_type, _ = self.stack_pointer_type.get_deref_field(
location, target_size=None
)
# Some variables on the stack are compiler-managed, and aren't declared
# in the original source. These variables can have different types inside
# different blocks, so we track their types but assume that they may change
# on each store.
# TODO: Because the types are tracked in StackInfo instead of RegInfo, it is
# possible that a load could incorrectly use a weak type from a sibling node
# instead of a parent node. A more correct implementation would use similar
# logic to the PhiExpr system. In practice however, storing types in StackInfo
# works well enough because nodes are traversed approximately depth-first.
# TODO: Maybe only do this for certain configurable regions?
# Get the previous type stored in `location`
previous_stored_type = self.weak_stack_var_types.get(location)
if previous_stored_type is not None:
# Check if the `field_type` is compatible with the type of the last store
if not previous_stored_type.unify(field_type):
# The types weren't compatible: mark this `location` as "weak"
# This marker is only used to annotate the output
self.weak_stack_var_locations.add(location)
if store:
# If there's already been a store to `location`, then return a fresh type
field_type = Type.any_field()
else:
# Use the type of the last store instead of the one from `get_deref_field()`
field_type = previous_stored_type
# Track the type last stored at `location`
if store:
self.weak_stack_var_types[location] = field_type
return LocalVar(location, type=field_type, path=field_path)
def maybe_get_register_var(self, reg: Register) -> Optional["RegisterVar"]:
return self.reg_vars.get(reg)
def add_register_var(self, reg: Register, name: str) -> None:
type = Type.floatish() if reg.is_float() else Type.intptr()
self.reg_vars[reg] = RegisterVar(reg=reg, type=type, name=name)
def use_register_var(self, var: "RegisterVar") -> None:
self.used_reg_vars.add(var.reg)
def is_stack_reg(self, reg: Register) -> bool:
if reg == self.global_info.arch.stack_pointer_reg:
return True
if reg == self.global_info.arch.frame_pointer_reg:
return self.uses_framepointer
return False
def get_struct_type_map(self) -> Dict["Expression", Dict[int, Type]]:
"""Reorganize struct information in unique_type_map by var & offset"""
struct_type_map: Dict[Expression, Dict[int, Type]] = {}
for (category, key), type in self.unique_type_map.items():
if category != "struct":
continue
var, offset = typing.cast(Tuple[Expression, int], key)
if var not in struct_type_map:
struct_type_map[var] = {}
struct_type_map[var][offset] = type
return struct_type_map
def __str__(self) -> str:
return "\n".join(
[
f"Stack info for function {self.function.name}:",
f"Allocated stack size: {self.allocated_stack_size}",
f"Leaf? {self.is_leaf}",
f"Bounds of callee-saved vars region: {self.callee_save_reg_region}",
f"Callee save registers: {self.callee_save_regs}",
]
)
def get_stack_info(
function: Function,
global_info: "GlobalInfo",
flow_graph: FlowGraph,
) -> StackInfo:
arch = global_info.arch
info = StackInfo(function, global_info, flow_graph)
# The goal here is to pick out special instructions that provide information
# about this function's stack setup.
#
# IDO puts local variables *above* the saved registers on the stack, but
# GCC puts local variables *below* the saved registers.
# To support both, we explicitly determine both the upper & lower bounds of the
# saved registers. Then, we estimate the boundary of the subroutine arguments
# by finding the lowest stack offset that is loaded from or computed. (This
# assumes that the compiler will never reuse a section of stack for *both*
# a local variable *and* a subroutine argument.) Anything within the stack frame,
# but outside of these two regions, is considered a local variable.
callee_saved_offsets: List[int] = []
# Track simple literal values stored into registers: MIPS compilers need a temp
# reg to move the stack pointer more than 0x7FFF bytes.
temp_reg_values: Dict[Register, int] = {}
for inst in flow_graph.entry_node().block.instructions:
arch_mnemonic = inst.arch_mnemonic(arch)
if inst.mnemonic in arch.instrs_fn_call:
break
elif arch_mnemonic == "mips:addiu" and inst.args[0] == arch.stack_pointer_reg:
# Moving the stack pointer on MIPS
assert isinstance(inst.args[2], AsmLiteral)
info.allocated_stack_size = abs(inst.args[2].signed_value())
elif (
arch_mnemonic == "mips:subu"
and inst.args[0] == arch.stack_pointer_reg
and inst.args[1] == arch.stack_pointer_reg
and inst.args[2] in temp_reg_values
):
# Moving the stack pointer more than 0x7FFF on MIPS
# TODO: This instruction needs to be ignored later in translation, in the
# same way that `addiu $sp, $sp, N` is ignored in handle_addi_real
assert isinstance(inst.args[2], Register)
info.allocated_stack_size = temp_reg_values[inst.args[2]]
elif arch_mnemonic == "ppc:stwu" and inst.args[0] == arch.stack_pointer_reg:
# Moving the stack pointer on PPC
assert isinstance(inst.args[1], AsmAddressMode)
assert isinstance(inst.args[1].lhs, AsmLiteral)
info.allocated_stack_size = abs(inst.args[1].lhs.signed_value())
elif (
arch_mnemonic == "mips:move"
and inst.args[0] == arch.frame_pointer_reg
and inst.args[1] == arch.stack_pointer_reg
):
# "move fp, sp" very likely means the code is compiled with frame
# pointers enabled; thus fp should be treated the same as sp.
info.uses_framepointer = True
elif (
arch_mnemonic
in [
"mips:sw",
"mips:swc1",
"mips:sdc1",
"ppc:stw",
"ppc:stmw",
"ppc:stfd",
"ppc:psq_st",
]
and isinstance(inst.args[0], Register)
and inst.args[0] in arch.saved_regs
and isinstance(inst.args[1], AsmAddressMode)
and inst.args[1].rhs == arch.stack_pointer_reg
and (
inst.args[0] not in info.callee_save_regs
or arch_mnemonic == "ppc:psq_st"
)
):
# Initial saving of callee-save register onto the stack.
if inst.args[0] in (arch.return_address_reg, Register("r0")):
# Saving the return address on the stack.
info.is_leaf = False
# The registers & their stack accesses must be matched up in ArchAsm.parse
for reg, mem in zip(inst.inputs, inst.outputs):
if isinstance(reg, Register) and isinstance(mem, StackLocation):
assert mem.symbolic_offset is None
stack_offset = mem.offset
if arch_mnemonic != "ppc:psq_st":
# psq_st instructions store the same register as stfd, just
# as packed singles instead. Prioritize the stfd.
info.callee_save_regs.add(reg)
callee_saved_offsets.append(stack_offset)
elif arch_mnemonic == "ppc:mflr" and inst.args[0] == Register("r0"):
info.is_leaf = False
elif arch_mnemonic == "mips:li" and inst.args[0] in arch.temp_regs:
assert isinstance(inst.args[0], Register)
assert isinstance(inst.args[1], AsmLiteral)
temp_reg_values[inst.args[0]] = inst.args[1].value
elif (
arch_mnemonic == "mips:ori"
and inst.args[0] == inst.args[1]
and inst.args[0] in temp_reg_values
):
assert isinstance(inst.args[0], Register)
assert isinstance(inst.args[2], AsmLiteral)
temp_reg_values[inst.args[0]] |= inst.args[2].value
if not info.is_leaf:
# Iterate over the whole function, not just the first basic block,
# to estimate the boundary for the subroutine argument region
info.subroutine_arg_top = info.allocated_stack_size
for node in flow_graph.nodes:
for inst in node.block.instructions:
arch_mnemonic = inst.arch_mnemonic(arch)
if (
arch_mnemonic in ["mips:lw", "mips:lwc1", "mips:ldc1", "ppc:lwz"]
and isinstance(inst.args[1], AsmAddressMode)
and inst.args[1].rhs == arch.stack_pointer_reg
and inst.args[1].lhs_as_literal() >= 16
):
info.subroutine_arg_top = min(
info.subroutine_arg_top, inst.args[1].lhs_as_literal()
)
elif (
arch_mnemonic == "mips:addiu"
and inst.args[0] != arch.stack_pointer_reg
and inst.args[1] == arch.stack_pointer_reg
and isinstance(inst.args[2], AsmLiteral)
and inst.args[2].value < info.allocated_stack_size
):
info.subroutine_arg_top = min(
info.subroutine_arg_top, inst.args[2].value
)
# Compute the bounds of the callee-saved register region, including padding
if callee_saved_offsets:
callee_saved_offsets.sort()
bottom = callee_saved_offsets[0]
# Both IDO & GCC save registers in two subregions:
# (a) One for double-sized registers
# (b) One for word-sized registers, padded to a multiple of 8 bytes
# IDO has (a) lower than (b); GCC has (b) lower than (a)
# Check that there are no gaps in this region, other than a single
# 4-byte word between subregions.
top = bottom
internal_padding_added = False
for offset in callee_saved_offsets:
if offset != top:
if not internal_padding_added and offset == top + 4:
internal_padding_added = True
else:
raise DecompFailure(
f"Gap in callee-saved word stack region. "
f"Saved: {callee_saved_offsets}, "
f"gap at: {offset} != {top}."
)
top = offset + 4
info.callee_save_reg_region = (bottom, top)
# Subroutine arguments must be at the very bottom of the stack, so they
# must come after the callee-saved region
info.subroutine_arg_top = min(info.subroutine_arg_top, bottom)
# Use a struct to represent the stack layout. If the struct is provided in the context,
# its fields will be used for variable types & names.
stack_struct_name = f"_mips2c_stack_{function.name}"
stack_struct = global_info.typepool.get_struct_by_tag_name(
stack_struct_name, global_info.typemap
)
if stack_struct is not None:
if stack_struct.size != info.allocated_stack_size:
raise DecompFailure(
f"Function {function.name} has a provided stack type {stack_struct_name} "
f"with size {stack_struct.size}, but the detected stack size was "
f"{info.allocated_stack_size}."
)
else:
stack_struct = StructDeclaration.unknown(
global_info.typepool,
size=info.allocated_stack_size,
tag_name=stack_struct_name,
)
# Mark the struct as a stack struct so we never try to use a reference to the struct itself
stack_struct.is_stack = True
stack_struct.new_field_prefix = "sp"
# This acts as the type of the $sp register
info.stack_pointer_type = Type.ptr(Type.struct(stack_struct))
return info
def format_hex(val: int) -> str:
return format(val, "x").upper()
def escape_byte(b: int) -> bytes:
table = {
b"\0": b"\\0",
b"\b": b"\\b",
b"\f": b"\\f",
b"\n": b"\\n",
b"\r": b"\\r",
b"\t": b"\\t",
b"\v": b"\\v",
b"\\": b"\\\\",
b'"': b'\\"',
}
bs = bytes([b])
if bs in table:
return table[bs]
if b < 0x20 or b in (0xFF, 0x7F):
return f"\\x{b:02x}".encode("ascii")
return bs
@dataclass(eq=False)
class Var:
stack_info: StackInfo = field(repr=False)
prefix: str
num_usages: int = 0
name: Optional[str] = None
def format(self, fmt: Formatter) -> str:
if self.name is None:
self.name = self.stack_info.temp_var(self.prefix)
return self.name
def __str__(self) -> str:
return "<temp>"
class Expression(abc.ABC):
type: Type
@abc.abstractmethod
def dependencies(self) -> List["Expression"]:
...
def use(self) -> None:
"""Mark an expression as "will occur in the output". Various subclasses
override this to provide special behavior; for instance, EvalOnceExpr
checks if it occurs more than once in the output and if so emits a temp.
It is important to get the number of use() calls correct:
* if use() is called but the expression is not emitted, it may cause
function calls to be silently dropped.
* if use() is not called but the expression is emitted, it may cause phi
variables to be printed as unnamed-phi($reg), without any assignment
to that phi.
* if use() is called once but the expression is emitted twice, it may
cause function calls to be duplicated."""
for expr in self.dependencies():
expr.use()
@abc.abstractmethod
def format(self, fmt: Formatter) -> str:
...
def __str__(self) -> str:
"""Stringify an expression for debug purposes. The output can change
depending on when this is called, e.g. because of EvalOnceExpr state.
To avoid using it by accident, output is quoted."""
fmt = Formatter(debug=True)
return '"' + self.format(fmt) + '"'
class Condition(Expression):
@abc.abstractmethod
def negated(self) -> "Condition":
...
class Statement(abc.ABC):
@abc.abstractmethod
def should_write(self) -> bool:
...
@abc.abstractmethod
def format(self, fmt: Formatter) -> str:
...
def __str__(self) -> str:
"""Stringify a statement for debug purposes. The output can change
depending on when this is called, e.g. because of EvalOnceExpr state.
To avoid using it by accident, output is quoted."""
fmt = Formatter(debug=True)
return '"' + self.format(fmt) + '"'
@dataclass(frozen=True, eq=False)
class ErrorExpr(Condition):
desc: Optional[str] = None
type: Type = field(default_factory=Type.any_reg)
def dependencies(self) -> List[Expression]:
return []
def negated(self) -> "Condition":
return self
def format(self, fmt: Formatter) -> str:
if self.desc is not None:
return f"MIPS2C_ERROR({self.desc})"
return "MIPS2C_ERROR()"
@dataclass(frozen=True)
class CommentExpr(Expression):
expr: Expression
type: Type = field(compare=False)
prefix: Optional[str] = None
suffix: Optional[str] = None
def dependencies(self) -> List[Expression]:
return [self.expr]
def format(self, fmt: Formatter) -> str:
expr_str = self.expr.format(fmt)
if fmt.coding_style.comment_style == CodingStyle.CommentStyle.NONE:
return expr_str
prefix_str = f"/* {self.prefix} */ " if self.prefix is not None else ""
suffix_str = f" /* {self.suffix} */" if self.suffix is not None else ""
return f"{prefix_str}{expr_str}{suffix_str}"
@staticmethod
def wrap(
expr: Expression, prefix: Optional[str] = None, suffix: Optional[str] = None
) -> Expression:
if prefix is None and suffix is None:
return expr
return CommentExpr(expr=expr, type=expr.type, prefix=prefix, suffix=suffix)
@dataclass(frozen=True, eq=False)
class SecondF64Half(Expression):
type: Type = field(default_factory=Type.any_reg)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
return "(second half of f64)"
@dataclass(frozen=True, eq=False)
class CarryBit(Expression):
type: Type = field(default_factory=Type.intish)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
return "MIPS2C_CARRY"
@staticmethod
def add_to(expr: Expression) -> "BinaryOp":
return fold_divmod(BinaryOp.intptr(expr, "+", CarryBit()))
@staticmethod
def sub_from(expr: Expression) -> "BinaryOp":
return BinaryOp.intptr(expr, "-", UnaryOp("!", CarryBit(), type=Type.intish()))
@dataclass(frozen=True, eq=False)
class BinaryOp(Condition):
left: Expression
op: str
right: Expression
type: Type
@staticmethod
def int(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_intish(left), op=op, right=as_intish(right), type=Type.intish()
)
@staticmethod
def int64(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_int64(left), op=op, right=as_int64(right), type=Type.int64()
)
@staticmethod
def intptr(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_intptr(left), op=op, right=as_intptr(right), type=Type.intptr()
)
@staticmethod
def icmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_intptr(left), op=op, right=as_intptr(right), type=Type.bool()
)
@staticmethod
def scmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_sintish(left, silent=True),
op=op,
right=as_sintish(right, silent=True),
type=Type.bool(),
)
@staticmethod
def sintptr_cmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_type(left, Type.sintptr(), False),
op=op,
right=as_type(right, Type.sintptr(), False),
type=Type.bool(),
)
@staticmethod
def ucmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_uintish(left), op=op, right=as_uintish(right), type=Type.bool()
)
@staticmethod
def uintptr_cmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_type(left, Type.uintptr(), False),
op=op,
right=as_type(right, Type.uintptr(), False),
type=Type.bool(),
)
@staticmethod
def fcmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_f32(left),
op=op,
right=as_f32(right),
type=Type.bool(),
)
@staticmethod
def dcmp(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_f64(left),
op=op,
right=as_f64(right),
type=Type.bool(),
)
@staticmethod
def sint(
left: Expression, op: str, right: Expression, *, silent: bool = False
) -> "BinaryOp":
return BinaryOp(
left=as_sintish(left, silent=silent),
op=op,
right=as_sintish(right, silent=silent),
type=Type.s32(),
)
@staticmethod
def uint(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_uintish(left), op=op, right=as_uintish(right), type=Type.u32()
)
@staticmethod
def s64(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(left=as_s64(left), op=op, right=as_s64(right), type=Type.s64())
@staticmethod
def u64(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(left=as_u64(left), op=op, right=as_u64(right), type=Type.u64())
@staticmethod
def f32(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_f32(left),
op=op,
right=as_f32(right),
type=Type.f32(),
)
@staticmethod
def f64(left: Expression, op: str, right: Expression) -> "BinaryOp":
return BinaryOp(
left=as_f64(left),
op=op,
right=as_f64(right),
type=Type.f64(),
)
def is_comparison(self) -> bool:
return self.op in ["==", "!=", ">", "<", ">=", "<="]
def is_floating(self) -> bool:
return self.left.type.is_float() and self.right.type.is_float()
def negated(self) -> "Condition":
if (
self.op in ["&&", "||"]
and isinstance(self.left, Condition)
and isinstance(self.right, Condition)
):
# DeMorgan's Laws
return BinaryOp(
left=self.left.negated(),
op={"&&": "||", "||": "&&"}[self.op],
right=self.right.negated(),
type=Type.bool(),
)
if not self.is_comparison() or (
self.is_floating() and self.op in ["<", ">", "<=", ">="]
):
# Floating-point comparisons cannot be negated in any nice way,
# due to nans.
return UnaryOp("!", self, type=Type.bool())
return BinaryOp(
left=self.left,
op={"==": "!=", "!=": "==", ">": "<=", "<": ">=", ">=": "<", "<=": ">"}[
self.op
],
right=self.right,
type=Type.bool(),
)
def dependencies(self) -> List[Expression]:
return [self.left, self.right]
def format(self, fmt: Formatter) -> str:
left_expr = late_unwrap(self.left)
right_expr = late_unwrap(self.right)
if (
self.is_comparison()
and isinstance(left_expr, Literal)
and not isinstance(right_expr, Literal)
):
return BinaryOp(
left=right_expr,
op=self.op.translate(str.maketrans("<>", "><")),
right=left_expr,
type=self.type,
).format(fmt)
if (
not self.is_floating()
and isinstance(right_expr, Literal)
and right_expr.value < 0
):
if self.op == "+":
neg = Literal(value=-right_expr.value, type=right_expr.type)
sub = BinaryOp(op="-", left=left_expr, right=neg, type=self.type)
return sub.format(fmt)
if self.op in ("&", "|"):
neg = Literal(value=~right_expr.value, type=right_expr.type)
right = UnaryOp("~", neg, type=Type.any_reg())
expr = BinaryOp(op=self.op, left=left_expr, right=right, type=self.type)
return expr.format(fmt)
# For commutative, left-associative operations, strip unnecessary parentheses.
lhs = left_expr.format(fmt)
if (
isinstance(left_expr, BinaryOp)
and left_expr.op == self.op
and self.op in ASSOCIATIVE_OPS
):
lhs = lhs[1:-1]
# For certain operators, use base-10 (decimal) for the RHS
if self.op in ("/", "%") and isinstance(right_expr, Literal):
rhs = right_expr.format(fmt, force_dec=True)
else:
rhs = right_expr.format(fmt)
# These aren't real operators (or functions); format them as a fn call
if self.op in PSEUDO_FUNCTION_OPS:
return f"{self.op}({lhs}, {rhs})"
return f"({lhs} {self.op} {rhs})"
@dataclass(frozen=True, eq=False)
class TernaryOp(Expression):
cond: Condition
left: Expression
right: Expression
type: Type
def dependencies(self) -> List[Expression]:
return [self.cond, self.left, self.right]
def format(self, fmt: Formatter) -> str:
cond_str = simplify_condition(self.cond).format(fmt)
left_str = self.left.format(fmt)
right_str = self.right.format(fmt)
return f"({cond_str} ? {left_str} : {right_str})"
@dataclass(frozen=True, eq=False)
class UnaryOp(Condition):
op: str
expr: Expression
type: Type
def dependencies(self) -> List[Expression]:
return [self.expr]
@staticmethod
def sint(op: str, expr: Expression) -> "UnaryOp":
expr = as_sintish(expr, silent=True)
return UnaryOp(
op=op,
expr=expr,
type=expr.type,
)
def negated(self) -> "Condition":
if self.op == "!" and isinstance(self.expr, (UnaryOp, BinaryOp)):
return self.expr
return UnaryOp("!", self, type=Type.bool())
def format(self, fmt: Formatter) -> str:
# These aren't real operators (or functions); format them as a fn call
if self.op in PSEUDO_FUNCTION_OPS:
return f"{self.op}({self.expr.format(fmt)})"
return f"{self.op}{self.expr.format(fmt)}"
@dataclass(frozen=True, eq=False)
class ExprCondition(Condition):
expr: Expression
type: Type
is_negated: bool = False
def dependencies(self) -> List[Expression]:
return [self.expr]
def negated(self) -> "Condition":
return ExprCondition(self.expr, self.type, not self.is_negated)
def format(self, fmt: Formatter) -> str:
neg = "!" if self.is_negated else ""
return f"{neg}{self.expr.format(fmt)}"
@dataclass(frozen=True, eq=False)
class CommaConditionExpr(Condition):
statements: List["Statement"]
condition: "Condition"
type: Type = Type.bool()
def dependencies(self) -> List[Expression]:
assert False, "CommaConditionExpr should not be used within translate.py"
return []
def negated(self) -> "Condition":
return CommaConditionExpr(self.statements, self.condition.negated())
def format(self, fmt: Formatter) -> str:
comma_joined = ", ".join(
stmt.format(fmt).rstrip(";") for stmt in self.statements
)
return f"({comma_joined}, {self.condition.format(fmt)})"
@dataclass(frozen=True, eq=False)
class Cast(Expression):
expr: Expression
type: Type
reinterpret: bool = False
silent: bool = True
def dependencies(self) -> List[Expression]:
return [self.expr]
def use(self) -> None:
# Try to unify, to make stringification output better.
self.expr.type.unify(self.type)
super().use()
def needed_for_store(self) -> bool:
if not self.reinterpret:
# int <-> float casts should be emitted even for stores.
return True
if not self.expr.type.unify(self.type):
# Emit casts when types fail to unify.
return True
return False
def is_trivial(self) -> bool:
return (
self.reinterpret
and self.expr.type.is_float() == self.type.is_float()
and is_trivial_expression(self.expr)
)
def format(self, fmt: Formatter) -> str:
if self.reinterpret and self.expr.type.is_float() != self.type.is_float():
# This shouldn't happen, but mark it in the output if it does.
if fmt.valid_syntax:
return (
f"MIPS2C_BITWISE({self.type.format(fmt)}, {self.expr.format(fmt)})"
)
return f"(bitwise {self.type.format(fmt)}) {self.expr.format(fmt)}"
if self.reinterpret and (
self.silent
or (is_type_obvious(self.expr) and self.expr.type.unify(self.type))
):
return self.expr.format(fmt)
if fmt.skip_casts:
return self.expr.format(fmt)
# Function casts require special logic because function calls have
# higher precedence than casts
fn_sig = self.type.get_function_pointer_signature()
if fn_sig:
prototype_sig = self.expr.type.get_function_pointer_signature()
if not prototype_sig or not prototype_sig.unify_with_args(fn_sig):
# A function pointer cast is required if the inner expr is not
# a function pointer, or has incompatible argument types
return f"(({self.type.format(fmt)}) {self.expr.format(fmt)})"
if not prototype_sig.return_type.unify(fn_sig.return_type):
# Only cast the return value of the call
return f"({fn_sig.return_type.format(fmt)}) {self.expr.format(fmt)}"
# No cast needed
return self.expr.format(fmt)
return f"({self.type.format(fmt)}) {self.expr.format(fmt)}"
@dataclass(frozen=True, eq=False)
class FuncCall(Expression):
function: Expression
args: List[Expression]
type: Type
def dependencies(self) -> List[Expression]:
return self.args + [self.function]
def format(self, fmt: Formatter) -> str:
# TODO: The function type may have a different number of params than it had
# when the FuncCall was created. Should we warn that there may be the wrong
# number of arguments at this callsite?
args = ", ".join(format_expr(arg, fmt) for arg in self.args)
return f"{self.function.format(fmt)}({args})"
@dataclass(frozen=True, eq=True)
class LocalVar(Expression):
value: int
type: Type = field(compare=False)
path: Optional[AccessPath] = field(compare=False)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
fallback_name = f"unksp{format_hex(self.value)}"
if self.path is None:
return fallback_name
name = StructAccess.access_path_to_field_name(self.path, fmt)
if name.startswith("->"):
return name[2:]
return fallback_name
def toplevel_decl(self, fmt: Formatter) -> Optional[str]:
"""Return a declaration for this LocalVar, if required."""
# If len(self.path) > 2, then this local is an inner field of another
# local, so it doesn't need to be declared.
if (
self.path is None
or len(self.path) != 2
or not isinstance(self.path[1], str)
):
return None
return self.type.to_decl(self.path[1], fmt)
@dataclass(frozen=True, eq=False)
class RegisterVar(Expression):
reg: Register
name: str
type: Type
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
return self.name
@dataclass(frozen=True, eq=True)
class PassedInArg(Expression):
value: int
copied: bool = field(compare=False)
stack_info: StackInfo = field(compare=False, repr=False)
type: Type = field(compare=False)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
assert self.value % 4 == 0
name = self.stack_info.get_param_name(self.value)
return name or f"arg{format_hex(self.value // 4)}"
@dataclass(frozen=True, eq=True)
class SubroutineArg(Expression):
value: int
type: Type = field(compare=False)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter) -> str:
return f"subroutine_arg{format_hex(self.value // 4)}"
@dataclass(eq=True, unsafe_hash=True)
class StructAccess(Expression):
# Represents struct_var->offset.
# This has eq=True since it represents a live expression and not an access
# at a certain point in time -- this sometimes helps get rid of phi nodes.
# prevent_later_uses makes sure it's not used after writes/function calls
# that may invalidate it.
struct_var: Expression
offset: int
target_size: Optional[int]
field_path: Optional[AccessPath] = field(compare=False)
stack_info: Optional[StackInfo] = field(compare=False, repr=False)
type: Type = field(compare=False)
checked_late_field_path: bool = field(default=False, compare=False)
def __post_init__(self) -> None:
# stack_info is used to resolve field_path late
assert (
self.stack_info is not None or self.field_path is not None
), "Must provide at least one of (stack_info, field_path)"
self.assert_valid_field_path(self.field_path)
@staticmethod
def assert_valid_field_path(path: Optional[AccessPath]) -> None:
assert path is None or (
path and isinstance(path[0], int)
), "The first element of the field path, if present, must be an int"
@classmethod
def access_path_to_field_name(cls, path: AccessPath, fmt: Formatter) -> str:
"""
Convert an access path into a dereferencing field name, like the following examples:
- `[0, "foo", 3, "bar"]` into `"->foo[3].bar"`
- `[0, 3, "bar"]` into `"[0][3].bar"`
- `[0, 1, 2]` into `"[0][1][2]"
- `[0]` into `"[0]"`
The path must have at least one element, and the first element must be an int.
"""
cls.assert_valid_field_path(path)
output = ""
# Replace an initial "[0]." with "->"
if len(path) >= 2 and path[0] == 0 and isinstance(path[1], str):
output += f"->{path[1]}"
path = path[2:]
for p in path:
if isinstance(p, str):
output += f".{p}"
elif isinstance(p, int):
output += f"[{fmt.format_int(p)}]"
else:
static_assert_unreachable(p)
return output
def dependencies(self) -> List[Expression]:
return [self.struct_var]
def make_reference(self) -> Optional["StructAccess"]:
field_path = self.late_field_path()
if field_path and len(field_path) >= 2 and field_path[-1] == 0:
return replace(self, field_path=field_path[:-1])
return None
def late_field_path(self) -> Optional[AccessPath]:
# If we didn't have a type at the time when the struct access was
# constructed, but now we do, compute field name.
if self.field_path is None and not self.checked_late_field_path:
var = late_unwrap(self.struct_var)
# Format var to recursively resolve any late_field_path it has to
# potentially improve var.type before we look up our field name
var.format(Formatter())
field_path, field_type, _ = var.type.get_deref_field(
self.offset, target_size=self.target_size
)
if field_path is not None:
self.assert_valid_field_path(field_path)
self.field_path = field_path
self.type.unify(field_type)
self.checked_late_field_path = True
return self.field_path
def late_has_known_type(self) -> bool:
if self.late_field_path() is not None:
return True
assert (
self.stack_info is not None
), "StructAccess must have stack_info if field_path isn't set"
if self.offset == 0:
var = late_unwrap(self.struct_var)
if (
not self.stack_info.has_nonzero_access(var)
and isinstance(var, AddressOf)
and isinstance(var.expr, GlobalSymbol)
and var.expr.type_provided
):
return True
return False
def format(self, fmt: Formatter) -> str:
var = late_unwrap(self.struct_var)
has_nonzero_access = False
if self.stack_info is not None:
has_nonzero_access = self.stack_info.has_nonzero_access(var)
field_path = self.late_field_path()
if field_path is not None and field_path != [0]:
has_nonzero_access = True
elif fmt.valid_syntax and (self.offset != 0 or has_nonzero_access):
offset_str = fmt.format_int(self.offset)
return f"MIPS2C_FIELD({var.format(fmt)}, {Type.ptr(self.type).format(fmt)}, {offset_str})"
else:
prefix = "unk" + ("_" if fmt.coding_style.unknown_underscore else "")
field_path = [0, prefix + format_hex(self.offset)]
field_name = self.access_path_to_field_name(field_path, fmt)
# Rewrite `(&x)->y` to `x.y` by stripping `AddressOf` & setting deref=False
deref = True
if (
isinstance(var, AddressOf)
and not var.expr.type.is_array()
and field_name.startswith("->")
):
var = var.expr
field_name = field_name.replace("->", ".", 1)
deref = False
# Rewrite `x->unk0` to `*x` and `x.unk0` to `x`, unless has_nonzero_access
if self.offset == 0 and not has_nonzero_access:
return f"{'*' if deref else ''}{var.format(fmt)}"
return f"{parenthesize_for_struct_access(var, fmt)}{field_name}"
@dataclass(frozen=True, eq=True)
class ArrayAccess(Expression):
# Represents ptr[index]. eq=True for symmetry with StructAccess.
ptr: Expression
index: Expression
type: Type = field(compare=False)
def dependencies(self) -> List[Expression]:
return [self.ptr, self.index]
def format(self, fmt: Formatter) -> str:
base = parenthesize_for_struct_access(self.ptr, fmt)
index = format_expr(self.index, fmt)
return f"{base}[{index}]"
@dataclass(eq=False)
class GlobalSymbol(Expression):
symbol_name: str
type: Type
asm_data_entry: Optional[AsmDataEntry] = None
symbol_in_context: bool = False
type_provided: bool = False
initializer_in_typemap: bool = False
demangled_str: Optional[str] = None
def dependencies(self) -> List[Expression]:
return []
def is_string_constant(self) -> bool:
ent = self.asm_data_entry
if not ent or not ent.is_string:
return False
return len(ent.data) == 1 and isinstance(ent.data[0], bytes)
def format_string_constant(self, fmt: Formatter) -> str:
assert self.is_string_constant(), "checked by caller"
assert self.asm_data_entry and isinstance(self.asm_data_entry.data[0], bytes)
has_trailing_null = False
data = self.asm_data_entry.data[0]
while data and data[-1] == 0:
data = data[:-1]
has_trailing_null = True
data = b"".join(map(escape_byte, data))
strdata = data.decode("utf-8", "backslashreplace")
ret = f'"{strdata}"'
if not has_trailing_null:
ret += " /* not null-terminated */"
return ret
def format(self, fmt: Formatter) -> str:
return self.symbol_name
def potential_array_dim(self, element_size: int) -> Tuple[int, int]:
"""
Using the size of the symbol's `asm_data_entry` and a potential array element
size, return the corresponding array dimension and number of "extra" bytes left
at the end of the symbol's data.
If the extra bytes are nonzero, then it's likely that `element_size` is incorrect.
"""
# If we don't have the .data/.rodata entry for this symbol, we can't guess
# its array dimension. Jump tables are ignored and not treated as arrays.
if self.asm_data_entry is None or self.asm_data_entry.is_jtbl:
return 0, element_size
min_data_size, max_data_size = self.asm_data_entry.size_range_bytes()
if element_size > max_data_size:
# The type is too big for the data (not an array)
return 0, max_data_size
# Check if it's possible that this symbol is not an array, and is just 1 element
if min_data_size <= element_size <= max_data_size and not self.type.is_array():
return 1, 0
array_dim, extra_bytes = divmod(min_data_size, element_size)
if extra_bytes != 0:
# If it's not possible to make an exact multiple of element_size by incorporating
# bytes from the padding, then indicate that in the return value.
padding_bytes = element_size - extra_bytes
if min_data_size + padding_bytes > max_data_size:
return array_dim, extra_bytes
# Include potential padding in the array. Although this is unlikely to match the original C,
# it's much easier to manually remove all or some of these elements than to add them back in.
return max_data_size // element_size, 0
@dataclass(frozen=True, eq=True)
class Literal(Expression):
value: int
type: Type = field(compare=False, default_factory=Type.any)
elide_cast: bool = field(compare=False, default=False)
def dependencies(self) -> List[Expression]:
return []
def format(self, fmt: Formatter, force_dec: bool = False) -> str:
enum_name = self.type.get_enum_name(self.value)
if enum_name is not None:
return enum_name
if self.type.is_likely_float():
if self.type.get_size_bits() == 64:
return format_f64_imm(self.value)
else:
return format_f32_imm(self.value) + "f"
if self.type.is_pointer() and self.value == 0:
return "NULL"
prefix = ""
suffix = ""
if not fmt.skip_casts and not self.elide_cast:
if self.type.is_pointer():
prefix = f"({self.type.format(fmt)})"
if self.type.is_unsigned():
suffix = "U"
if force_dec:
value = str(self.value)
else:
size_bits = self.type.get_size_bits()
v = self.value
# The top 2 bits are tested rather than just the sign bit
# to help prevent N64 VRAM pointers (0x80000000+) turning negative
if (
self.type.is_signed()
and size_bits
and v & (1 << (size_bits - 1))
and v > (3 << (size_bits - 2))
and v < 2 ** size_bits
):
v -= 1 << size_bits
value = fmt.format_int(v, size_bits=size_bits)
return prefix + value + suffix
def likely_partial_offset(self) -> bool:
return self.value % 2 ** 15 in (0, 2 ** 15 - 1) and self.value < 0x1000000
@dataclass(frozen=True, eq=True)
class AddressOf(Expression):
expr: Expression
type: Type = field(compare=False, default_factory=Type.ptr)
def dependencies(self) -> List[Expression]:
return [self.expr]
def format(self, fmt: Formatter) -> str:
if isinstance(self.expr, GlobalSymbol):
if self.expr.is_string_constant():
return self.expr.format_string_constant(fmt)
if self.expr.type.is_array():
return f"{self.expr.format(fmt)}"
if self.expr.type.is_function():
# Functions are automatically converted to function pointers
# without an explicit `&` by the compiler
return f"{self.expr.format(fmt)}"
if isinstance(self.expr, StructAccess):
# Simplify `&x[0]` into `x`
ref = self.expr.make_reference()
if ref:
return f"{ref.format(fmt)}"
return f"&{self.expr.format(fmt)}"
@dataclass(frozen=True)
class Lwl(Expression):
load_expr: Expression
key: Tuple[int, object]
type: Type = field(compare=False, default_factory=Type.any_reg)
def dependencies(self) -> List[Expression]:
return [self.load_expr]
def format(self, fmt: Formatter) -> str:
return f"MIPS2C_LWL({self.load_expr.format(fmt)})"
@dataclass(frozen=True)
class Load3Bytes(Expression):
load_expr: Expression
type: Type = field(compare=False, default_factory=Type.any_reg)
def dependencies(self) -> List[Expression]:
return [self.load_expr]
def format(self, fmt: Formatter) -> str:
if fmt.valid_syntax:
return f"MIPS2C_FIRST3BYTES({self.load_expr.format(fmt)})"
return f"(first 3 bytes) {self.load_expr.format(fmt)}"
@dataclass(frozen=True)
class UnalignedLoad(Expression):
load_expr: Expression
type: Type = field(compare=False, default_factory=Type.any_reg)
def dependencies(self) -> List[Expression]:
return [self.load_expr]
def format(self, fmt: Formatter) -> str:
if fmt.valid_syntax:
return f"MIPS2C_UNALIGNED32({self.load_expr.format(fmt)})"
return f"(unaligned s32) {self.load_expr.format(fmt)}"
@dataclass(frozen=False, eq=False)
class EvalOnceExpr(Expression):
wrapped_expr: Expression
var: Var
type: Type
# True for function calls/errors
emit_exactly_once: bool
# Mutable state:
# True if this EvalOnceExpr should be totally transparent and not emit a variable,
# It may dynamically change from true to false due to forced emissions.
# Initially, it is based on is_trivial_expression.
trivial: bool
# True if this EvalOnceExpr must emit a variable (see RegMeta.force)
forced_emit: bool = False
# The number of expressions that depend on this EvalOnceExpr; we emit a variable
# if this is > 1.
num_usages: int = 0
def dependencies(self) -> List[Expression]:
# (this is a bit iffy since state can change over time, but improves uses_expr)
if self.need_decl():
return []
return [self.wrapped_expr]
def use(self) -> None:
self.num_usages += 1
if self.trivial or (self.num_usages == 1 and not self.emit_exactly_once):
self.wrapped_expr.use()
def force(self) -> None:
# Transition to non-trivial, and mark as used multiple times to force a var.
# TODO: If it was originally trivial, we may previously have marked its
# wrappee used multiple times, even though we now know that it should
# have been marked just once... We could fix that by moving marking of
# trivial EvalOnceExpr's to the very end. At least the consequences of
# getting this wrong are pretty mild -- it just causes extraneous var
# emission in rare cases.
self.trivial = False
self.forced_emit = True
self.use()
self.use()
def need_decl(self) -> bool:
return self.num_usages > 1 and not self.trivial
def format(self, fmt: Formatter) -> str:
if not self.need_decl():
return self.wrapped_expr.format(fmt)
else:
return self.var.format(fmt)
@dataclass(frozen=False, eq=False)
class PhiExpr(Expression):
reg: Register
node: Node
type: Type
used_phis: List["PhiExpr"]
name: Optional[str] = None
num_usages: int = 0
replacement_expr: Optional[Expression] = None
used_by: Optional["PhiExpr"] = None
def dependencies(self) -> List[Expression]:
return []
def get_var_name(self) -> str:
return self.name or f"unnamed-phi({self.reg.register_name})"
def use(self, from_phi: Optional["PhiExpr"] = None) -> None:
if self.num_usages == 0:
self.used_phis.append(self)
self.used_by = from_phi
self.num_usages += 1
if self.used_by != from_phi:
self.used_by = None
if self.replacement_expr is not None:
self.replacement_expr.use()
def propagates_to(self) -> "PhiExpr":
"""Compute the phi that stores to this phi should propagate to. This is
usually the phi itself, but if the phi is only once for the purpose of
computing another phi, we forward the store there directly. This is
admittedly a bit sketchy, in case the phi is in scope here and used
later on... but we have that problem with regular phi assignments as
well."""
if self.used_by is None or self.replacement_expr is not None:
return self
return self.used_by.propagates_to()
def format(self, fmt: Formatter) -> str:
if self.replacement_expr:
return self.replacement_expr.format(fmt)
return self.get_var_name()
@dataclass
class SwitchControl:
control_expr: Expression
jump_table: Optional[GlobalSymbol] = None
offset: int = 0
is_irregular: bool = False
def matches_guard_condition(self, cond: Condition) -> bool:
"""
Return True if `cond` is one of:
- `((control_expr + (-offset)) >= len(jump_table))`, if `offset != 0`
- `(control_expr >= len(jump_table))`, if `offset == 0`
These are the appropriate bounds checks before using `jump_table`.
"""
cmp_expr = simplify_condition(cond)
if not isinstance(cmp_expr, BinaryOp) or cmp_expr.op not in (">=", ">"):
return False
cmp_exclusive = cmp_expr.op == ">"
# The LHS may have been wrapped in a u32 cast
left_expr = late_unwrap(cmp_expr.left)
if isinstance(left_expr, Cast):
left_expr = late_unwrap(left_expr.expr)
if self.offset != 0:
if (
not isinstance(left_expr, BinaryOp)
or late_unwrap(left_expr.left) != late_unwrap(self.control_expr)
or left_expr.op != "+"
or late_unwrap(left_expr.right) != Literal(-self.offset)
):
return False
elif left_expr != late_unwrap(self.control_expr):
return False
right_expr = late_unwrap(cmp_expr.right)
if (
self.jump_table is None
or self.jump_table.asm_data_entry is None
or not self.jump_table.asm_data_entry.is_jtbl
or not isinstance(right_expr, Literal)
):
return False
# Count the number of labels (exclude padding bytes)
jump_table_len = sum(
isinstance(e, str) for e in self.jump_table.asm_data_entry.data
)
return right_expr.value + int(cmp_exclusive) == jump_table_len
@staticmethod
def irregular_from_expr(control_expr: Expression) -> "SwitchControl":
"""
Return a SwitchControl representing a "irregular" switch statement.
The switch does not have a single jump table; instead it is a series of
if statements & other switches.
"""
return SwitchControl(
control_expr=control_expr,
jump_table=None,
offset=0,
is_irregular=True,
)
@staticmethod
def from_expr(expr: Expression) -> "SwitchControl":
"""
Try to convert `expr` into a SwitchControl from one of the following forms:
- `*(&jump_table + (control_expr * 4))`
- `*(&jump_table + ((control_expr + (-offset)) * 4))`
If `offset` is not present, it defaults to 0.
If `expr` does not match, return a thin wrapper around the input expression,
with `jump_table` set to `None`.
"""
# The "error" expression we use if we aren't able to parse `expr`
error_expr = SwitchControl(expr)
# Match `*(&jump_table + (control_expr * 4))`
struct_expr = early_unwrap(expr)
if not isinstance(struct_expr, StructAccess) or struct_expr.offset != 0:
return error_expr
add_expr = early_unwrap(struct_expr.struct_var)
if not isinstance(add_expr, BinaryOp) or add_expr.op != "+":
return error_expr
# Check for either `*(&jump_table + (control_expr * 4))` and `*((control_expr * 4) + &jump_table)`
left_expr, right_expr = early_unwrap(add_expr.left), early_unwrap(
add_expr.right
)
if isinstance(left_expr, AddressOf) and isinstance(
left_expr.expr, GlobalSymbol
):
jtbl_addr_expr, mul_expr = left_expr, right_expr
elif isinstance(right_expr, AddressOf) and isinstance(
right_expr.expr, GlobalSymbol
):
mul_expr, jtbl_addr_expr = left_expr, right_expr
else:
return error_expr
jump_table = jtbl_addr_expr.expr
assert isinstance(jump_table, GlobalSymbol)
if (
not isinstance(mul_expr, BinaryOp)
or mul_expr.op != "*"
or early_unwrap(mul_expr.right) != Literal(4)
):
return error_expr
control_expr = mul_expr.left
# Optionally match `control_expr + (-offset)`
offset = 0
uw_control_expr = early_unwrap(control_expr)
if isinstance(uw_control_expr, BinaryOp) and uw_control_expr.op == "+":
offset_lit = early_unwrap(uw_control_expr.right)
if isinstance(offset_lit, Literal):
control_expr = uw_control_expr.left
offset = -offset_lit.value
# Check that it is really a jump table
if jump_table.asm_data_entry is None or not jump_table.asm_data_entry.is_jtbl:
return error_expr
return SwitchControl(control_expr, jump_table, offset)
@dataclass
class EvalOnceStmt(Statement):
expr: EvalOnceExpr
def need_decl(self) -> bool:
return self.expr.need_decl()
def should_write(self) -> bool:
if self.expr.emit_exactly_once:
return self.expr.num_usages != 1
else:
return self.need_decl()
def format(self, fmt: Formatter) -> str:
val_str = format_expr(elide_casts_for_store(self.expr.wrapped_expr), fmt)
if self.expr.emit_exactly_once and self.expr.num_usages == 0:
return f"{val_str};"
return f"{self.expr.var.format(fmt)} = {val_str};"
@dataclass
class SetPhiStmt(Statement):
phi: PhiExpr
expr: Expression
def should_write(self) -> bool:
expr = self.expr
if isinstance(expr, PhiExpr) and expr.propagates_to() != expr:
# When we have phi1 = phi2, and phi2 is only used in this place,
# the SetPhiStmt for phi2 will store directly to phi1 and we can
# skip this store.
assert expr.propagates_to() == self.phi.propagates_to()
return False
if late_unwrap(expr) == self.phi.propagates_to():
# Elide "phi = phi".
return False
return True
def format(self, fmt: Formatter) -> str:
return format_assignment(self.phi.propagates_to(), self.expr, fmt)
@dataclass
class ExprStmt(Statement):
expr: Expression
def should_write(self) -> bool:
return True
def format(self, fmt: Formatter) -> str:
return f"{format_expr(self.expr, fmt)};"
@dataclass
class StoreStmt(Statement):
source: Expression
dest: Expression
def should_write(self) -> bool:
return True
def format(self, fmt: Formatter) -> str:
dest = self.dest
source = self.source
if (
isinstance(dest, StructAccess) and dest.late_has_known_type()
) or isinstance(dest, (ArrayAccess, LocalVar, RegisterVar, SubroutineArg)):
# Known destination; fine to elide some casts.
source = elide_casts_for_store(source)
return format_assignment(dest, source, fmt)
@dataclass
class CommentStmt(Statement):
contents: str
def should_write(self) -> bool:
return True
def format(self, fmt: Formatter) -> str:
return f"// {self.contents}"
def error_stmt(msg: str) -> ExprStmt:
return ExprStmt(ErrorExpr(msg))
@dataclass(frozen=True)
class AddressMode:
offset: int
rhs: Register
def __str__(self) -> str:
if self.offset:
return f"{self.offset}({self.rhs})"
else:
return f"({self.rhs})"
@dataclass(frozen=True)
class RawSymbolRef:
offset: int
sym: AsmGlobalSymbol
def __str__(self) -> str:
if self.offset:
return f"{self.sym.symbol_name} + {self.offset}"
else:
return self.sym.symbol_name
@dataclass
class RegMeta:
# True if this regdata is unchanged from the start of the block
inherited: bool = False
# True if this regdata is read by some later node
is_read: bool = False
# True if the value derives solely from function call return values
function_return: bool = False
# True if the value derives solely from regdata's with is_read = True,
# function_return = True, or is a passed in argument
uninteresting: bool = False
# True if the regdata must be replaced by variable if it is ever read
force: bool = False
# True if the regdata was assigned by an Instruction marked as in_pattern;
# it was part of a matched IR pattern but couldn't be elided at the time
in_pattern: bool = False
@dataclass
class RegData:
value: Expression
meta: RegMeta
@dataclass
class RegInfo:
stack_info: StackInfo = field(repr=False)
contents: Dict[Register, RegData] = field(default_factory=dict)
read_inherited: Set[Register] = field(default_factory=set)
_active_instr: Optional[Instruction] = None
def __getitem__(self, key: Register) -> Expression:
if self._active_instr is not None and key not in self._active_instr.inputs:
lineno = self._active_instr.meta.lineno
return ErrorExpr(f"Read from unset register {key} on line {lineno}")
if key == Register("zero"):
return Literal(0)
data = self.contents.get(key)
if data is None:
return ErrorExpr(f"Read from unset register {key}")
ret = data.value
data.meta.is_read = True
if data.meta.inherited:
self.read_inherited.add(key)
if isinstance(ret, PassedInArg) and not ret.copied:
# Create a new argument object to better distinguish arguments we
# are called with from arguments passed to subroutines. Also, unify
# the argument's type with what we can guess from the register used.
val, arg = self.stack_info.get_argument(ret.value)
self.stack_info.add_argument(arg)
val.type.unify(ret.type)
return val
if data.meta.force:
assert isinstance(ret, EvalOnceExpr)
ret.force()
return ret
def __contains__(self, key: Register) -> bool:
return key in self.contents
def __setitem__(self, key: Register, value: Expression) -> None:
self.set_with_meta(key, value, RegMeta())
def set_with_meta(self, key: Register, value: Expression, meta: RegMeta) -> None:
if self._active_instr is not None and key not in self._active_instr.outputs:
raise DecompFailure(f"Undeclared write to {key} in {self._active_instr}")
self.unchecked_set_with_meta(key, value, meta)
def unchecked_set_with_meta(
self, key: Register, value: Expression, meta: RegMeta
) -> None:
assert key != Register("zero")
self.contents[key] = RegData(value, meta)
def __delitem__(self, key: Register) -> None:
assert key != Register("zero")
del self.contents[key]
def get_raw(self, key: Register) -> Optional[Expression]:
data = self.contents.get(key)
return data.value if data is not None else None
def get_meta(self, key: Register) -> Optional[RegMeta]:
data = self.contents.get(key)
return data.meta if data is not None else None
@contextmanager
def current_instr(self, instr: Instruction) -> Iterator[None]:
self._active_instr = instr
try:
with current_instr(instr):
yield
finally:
self._active_instr = None
def __str__(self) -> str:
return ", ".join(
f"{k}: {v.value}"
for k, v in sorted(self.contents.items(), key=lambda x: x[0].register_name)
if not self.stack_info.should_save(v.value, None)
)
@dataclass
class BlockInfo:
"""
Contains translated assembly code (to_write), the block's branch condition,
and block's final register states.
"""
to_write: List[Statement]
return_value: Optional[Expression]
switch_control: Optional[SwitchControl]
branch_condition: Optional[Condition]
final_register_states: RegInfo
has_function_call: bool
def __str__(self) -> str:
newline = "\n\t"
return "\n".join(
[
f"Statements: {newline.join(str(w) for w in self.statements_to_write())}",
f"Branch condition: {self.branch_condition}",
f"Final register states: {self.final_register_states}",
]
)
def statements_to_write(self) -> List[Statement]:
return [st for st in self.to_write if st.should_write()]
def get_block_info(node: Node) -> BlockInfo:
ret = node.block.block_info
assert isinstance(ret, BlockInfo)
return ret
@dataclass
class InstrArgs:
raw_args: List[Argument]
regs: RegInfo = field(repr=False)
stack_info: StackInfo = field(repr=False)
def raw_arg(self, index: int) -> Argument:
assert index >= 0
if index >= len(self.raw_args):
raise DecompFailure(
f"Too few arguments for instruction, expected at least {index + 1}"
)
return self.raw_args[index]
def reg_ref(self, index: int) -> Register:
ret = self.raw_arg(index)
if not isinstance(ret, Register):
raise DecompFailure(
f"Expected instruction argument to be a register, but found {ret}"
)
return ret
def imm_value(self, index: int) -> int:
arg = self.full_imm(index)
assert isinstance(arg, Literal)
return arg.value
def reg(self, index: int) -> Expression:
return self.regs[self.reg_ref(index)]
def dreg(self, index: int) -> Expression:
"""Extract a double from a register. This may involve reading both the
mentioned register and the next."""
reg = self.reg_ref(index)
if not reg.is_float():
raise DecompFailure(
f"Expected instruction argument {reg} to be a float register"
)
ret = self.regs[reg]
# PPC: FPR's hold doubles (64 bits), so we don't need to do anything special
if self.stack_info.global_info.arch.arch == Target.ArchEnum.PPC:
return ret
# MIPS: Look at the paired FPR to get the full 64-bit value
if not isinstance(ret, Literal) or ret.type.get_size_bits() == 64:
return ret
reg_num = int(reg.register_name[1:])
if reg_num % 2 != 0:
raise DecompFailure(
"Tried to use a double-precision instruction with odd-numbered float "
f"register {reg}"
)
other = self.regs[Register(f"f{reg_num+1}")]
if not isinstance(other, Literal) or other.type.get_size_bits() == 64:
raise DecompFailure(
f"Unable to determine a value for double-precision register {reg} "
"whose second half is non-static. This is a mips_to_c restriction "
"which may be lifted in the future."
)
value = ret.value | (other.value << 32)
return Literal(value, type=Type.f64())
def cmp_reg(self, key: str) -> Condition:
cond = self.regs[Register(key)]
if not isinstance(cond, Condition):
cond = BinaryOp.icmp(cond, "!=", Literal(0))
return cond
def full_imm(self, index: int) -> Expression:
arg = strip_macros(self.raw_arg(index))
ret = literal_expr(arg, self.stack_info)
return ret
def imm(self, index: int) -> Expression:
ret = self.full_imm(index)
if isinstance(ret, Literal):
return Literal(((ret.value + 0x8000) & 0xFFFF) - 0x8000)
return ret
def unsigned_imm(self, index: int) -> Expression:
ret = self.full_imm(index)
if isinstance(ret, Literal):
return Literal(ret.value & 0xFFFF)
return ret
def hi_imm(self, index: int) -> Argument:
arg = self.raw_arg(index)
if not isinstance(arg, Macro) or arg.macro_name not in ("hi", "ha", "h"):
raise DecompFailure(
f"Got lui/lis instruction with macro other than %hi/@ha/@h: {arg}"
)
return arg.argument
def shifted_imm(self, index: int) -> Expression:
# TODO: Should this be part of hi_imm? Do we need to handle @ha?
raw_imm = self.unsigned_imm(index)
assert isinstance(raw_imm, Literal)
return Literal(raw_imm.value << 16)
def memory_ref(self, index: int) -> Union[AddressMode, RawSymbolRef]:
ret = strip_macros(self.raw_arg(index))
# In MIPS, we want to allow "lw $v0, symbol + 4", which is outputted by
# some disassemblers (like IDA) even though it isn't valid assembly.
# For PPC, we want to allow "lwz $r1, symbol@sda21($r13)" where $r13 is
# assumed to point to the start of a small data area (SDA).
if isinstance(ret, AsmGlobalSymbol):
return RawSymbolRef(offset=0, sym=ret)
if (
isinstance(ret, BinOp)
and ret.op in "+-"
and isinstance(ret.lhs, AsmGlobalSymbol)
and isinstance(ret.rhs, AsmLiteral)
):
sign = 1 if ret.op == "+" else -1
return RawSymbolRef(offset=(ret.rhs.value * sign), sym=ret.lhs)
if not isinstance(ret, AsmAddressMode):
raise DecompFailure(
"Expected instruction argument to be of the form offset($register), "
f"but found {ret}"
)
if not isinstance(ret.lhs, AsmLiteral):
raise DecompFailure(
f"Unable to parse offset for instruction argument {ret}. "
"Expected a constant or a %lo macro."
)
return AddressMode(offset=ret.lhs.signed_value(), rhs=ret.rhs)
def count(self) -> int:
return len(self.raw_args)
def deref(
arg: Union[AddressMode, RawSymbolRef, Expression],
regs: RegInfo,
stack_info: StackInfo,
*,
size: int,
store: bool = False,
) -> Expression:
if isinstance(arg, Expression):
offset = 0
var = arg
elif isinstance(arg, AddressMode):
offset = arg.offset
if stack_info.is_stack_reg(arg.rhs):
return stack_info.get_stack_var(offset, store=store)
var = regs[arg.rhs]
else:
offset = arg.offset
var = stack_info.global_info.address_of_gsym(arg.sym.symbol_name)
# Struct member is being dereferenced.
# Cope slightly better with raw pointers.
if isinstance(var, Literal) and var.value % (2 ** 16) == 0:
var = Literal(var.value + offset, type=var.type)
offset = 0
# Handle large struct offsets.
uw_var = early_unwrap(var)
if isinstance(uw_var, BinaryOp) and uw_var.op == "+":
for base, addend in [(uw_var.left, uw_var.right), (uw_var.right, uw_var.left)]:
if isinstance(addend, Literal) and addend.likely_partial_offset():
offset += addend.value
var = base
uw_var = early_unwrap(var)
break
var.type.unify(Type.ptr())
stack_info.record_struct_access(var, offset)
field_name: Optional[str] = None
type: Type = stack_info.unique_type_for("struct", (uw_var, offset), Type.any())
# Struct access with type information.
array_expr = array_access_from_add(
var, offset, stack_info, target_size=size, ptr=False
)
if array_expr is not None:
return array_expr
field_path, field_type, _ = var.type.get_deref_field(offset, target_size=size)
if field_path is not None:
field_type.unify(type)
type = field_type
else:
field_path = None
return StructAccess(
struct_var=var,
offset=offset,
target_size=size,
field_path=field_path,
stack_info=stack_info,
type=type,
)
def is_trivial_expression(expr: Expression) -> bool:
# Determine whether an expression should be evaluated only once or not.
if isinstance(
expr,
(
EvalOnceExpr,
Literal,
GlobalSymbol,
LocalVar,
PassedInArg,
PhiExpr,
RegisterVar,
SubroutineArg,
),
):
return True
if isinstance(expr, AddressOf):
return all(is_trivial_expression(e) for e in expr.dependencies())
if isinstance(expr, Cast):
return expr.is_trivial()
return False
def is_type_obvious(expr: Expression) -> bool:
"""
Determine whether an expression's type is "obvious", e.g. because the
expression refers to a variable which has a declaration. With perfect type
information this this function would not be needed.
This function may produce wrong results while code is being generated,
since at that point we don't know the final status of EvalOnceExpr's.
"""
if isinstance(
expr,
(
Cast,
Literal,
AddressOf,
LocalVar,
PhiExpr,
PassedInArg,
RegisterVar,
FuncCall,
),
):
return True
if isinstance(expr, EvalOnceExpr):
if expr.need_decl():
return True
return is_type_obvious(expr.wrapped_expr)
return False
def simplify_condition(expr: Expression) -> Expression:
"""
Simplify a boolean expression.
This function may produce wrong results while code is being generated,
since at that point we don't know the final status of EvalOnceExpr's.
"""
if isinstance(expr, EvalOnceExpr) and not expr.need_decl():
return simplify_condition(expr.wrapped_expr)
if isinstance(expr, UnaryOp):
inner = simplify_condition(expr.expr)
if expr.op == "!" and isinstance(inner, Condition):
return inner.negated()
return UnaryOp(expr=inner, op=expr.op, type=expr.type)
if isinstance(expr, BinaryOp):
left = simplify_condition(expr.left)
right = simplify_condition(expr.right)
if isinstance(left, BinaryOp) and left.is_comparison() and right == Literal(0):
if expr.op == "==":
return simplify_condition(left.negated())
if expr.op == "!=":
return left
if (
expr.is_comparison()
and isinstance(left, Literal)
and not isinstance(right, Literal)
):
return BinaryOp(
left=right,
op=expr.op.translate(str.maketrans("<>", "><")),
right=left,
type=expr.type,
)
return BinaryOp(left=left, op=expr.op, right=right, type=expr.type)
return expr
def balanced_parentheses(string: str) -> bool:
"""
Check if parentheses in a string are balanced, ignoring any non-parenthesis
characters. E.g. true for "(x())yz", false for ")(" or "(".
"""
bal = 0
for c in string:
if c == "(":
bal += 1
elif c == ")":
if bal == 0:
return False
bal -= 1
return bal == 0
def format_expr(expr: Expression, fmt: Formatter) -> str:
"""Stringify an expression, stripping unnecessary parentheses around it."""
ret = expr.format(fmt)
if ret.startswith("(") and balanced_parentheses(ret[1:-1]):
return ret[1:-1]
return ret
def format_assignment(dest: Expression, source: Expression, fmt: Formatter) -> str:
"""Stringify `dest = source;`."""
dest = late_unwrap(dest)
source = late_unwrap(source)
if isinstance(source, BinaryOp) and source.op in COMPOUND_ASSIGNMENT_OPS:
rhs = None
if late_unwrap(source.left) == dest:
rhs = source.right
elif late_unwrap(source.right) == dest and source.op in ASSOCIATIVE_OPS:
rhs = source.left
if rhs is not None:
return f"{dest.format(fmt)} {source.op}= {format_expr(rhs, fmt)};"
return f"{dest.format(fmt)} = {format_expr(source, fmt)};"
def parenthesize_for_struct_access(expr: Expression, fmt: Formatter) -> str:
# Nested dereferences may need to be parenthesized. All other
# expressions will already have adequate parentheses added to them.
s = expr.format(fmt)
if (
s.startswith("*")
or s.startswith("&")
or (isinstance(expr, Cast) and expr.needed_for_store())
):
return f"({s})"
return s
def elide_casts_for_store(expr: Expression) -> Expression:
uw_expr = late_unwrap(expr)
if isinstance(uw_expr, Cast) and not uw_expr.needed_for_store():
return elide_casts_for_store(uw_expr.expr)
if isinstance(uw_expr, Literal) and uw_expr.type.is_int():
# Avoid suffixes for unsigned ints
return replace(uw_expr, elide_cast=True)
return uw_expr
def uses_expr(expr: Expression, expr_filter: Callable[[Expression], bool]) -> bool:
if expr_filter(expr):
return True
for e in expr.dependencies():
if uses_expr(e, expr_filter):
return True
return False
def late_unwrap(expr: Expression) -> Expression:
"""
Unwrap EvalOnceExpr's, stopping at variable boundaries.
This function may produce wrong results while code is being generated,
since at that point we don't know the final status of EvalOnceExpr's.
"""
if isinstance(expr, EvalOnceExpr) and not expr.need_decl():
return late_unwrap(expr.wrapped_expr)
if isinstance(expr, PhiExpr) and expr.replacement_expr is not None:
return late_unwrap(expr.replacement_expr)
return expr
def early_unwrap(expr: Expression) -> Expression:
"""
Unwrap EvalOnceExpr's, even past variable boundaries.
This is fine to use even while code is being generated, but disrespects decisions
to use a temp for a value, so use with care.
"""
if (
isinstance(expr, EvalOnceExpr)
and not expr.forced_emit
and not expr.emit_exactly_once
):
return early_unwrap(expr.wrapped_expr)
return expr
def early_unwrap_ints(expr: Expression) -> Expression:
"""
Unwrap EvalOnceExpr's, even past variable boundaries or through int Cast's
This is a bit sketchier than early_unwrap(), but can be used for pattern matching.
"""
uw_expr = early_unwrap(expr)
if isinstance(uw_expr, Cast) and uw_expr.reinterpret and uw_expr.type.is_int():
return early_unwrap_ints(uw_expr.expr)
return uw_expr
def unwrap_deep(expr: Expression) -> Expression:
"""
Unwrap EvalOnceExpr's, even past variable boundaries.
This is generally a sketchy thing to do, try to avoid it. In particular:
- the returned expression is not usable for emission, because it may contain
accesses at an earlier point in time or an expression that should not be repeated.
- just because unwrap_deep(a) == unwrap_deep(b) doesn't mean a and b are
interchangable, because they may be computed in different places.
"""
if isinstance(expr, EvalOnceExpr):
return unwrap_deep(expr.wrapped_expr)
return expr
def literal_expr(arg: Argument, stack_info: StackInfo) -> Expression:
if isinstance(arg, AsmGlobalSymbol):
return stack_info.global_info.address_of_gsym(arg.symbol_name)
if isinstance(arg, AsmLiteral):
return Literal(arg.value)
if isinstance(arg, BinOp):
lhs = literal_expr(arg.lhs, stack_info)
rhs = literal_expr(arg.rhs, stack_info)
return BinaryOp.int(left=lhs, op=arg.op, right=rhs)
raise DecompFailure(f"Instruction argument {arg} must be a literal")
def imm_add_32(expr: Expression) -> Expression:
if isinstance(expr, Literal):
return as_intish(Literal(expr.value + 32))
else:
return BinaryOp.int(expr, "+", Literal(32))
def fn_op(fn_name: str, args: List[Expression], type: Type) -> FuncCall:
fn_sig = FunctionSignature(
return_type=type,
params=[FunctionParam(type=arg.type) for arg in args],
params_known=True,
is_variadic=False,
)
return FuncCall(
function=GlobalSymbol(symbol_name=fn_name, type=Type.function(fn_sig)),
args=args,
type=type,
)
def void_fn_op(fn_name: str, args: List[Expression]) -> ExprStmt:
fn_call = fn_op(fn_name, args, Type.any_reg())
fn_call.use()
return ExprStmt(fn_call)
def load_upper(args: InstrArgs) -> Expression:
arg = args.raw_arg(1)
if not isinstance(arg, Macro):
assert not isinstance(
arg, Literal
), "normalize_instruction should convert lui/lis <literal> to li"
raise DecompFailure(
f"lui/lis argument must be a literal or %hi/@ha macro, found {arg}"
)
hi_arg = args.hi_imm(1)
if (
isinstance(hi_arg, BinOp)
and hi_arg.op in "+-"
and isinstance(hi_arg.lhs, AsmGlobalSymbol)
and isinstance(hi_arg.rhs, AsmLiteral)
):
sym = hi_arg.lhs
offset = hi_arg.rhs.value * (-1 if hi_arg.op == "-" else 1)
elif isinstance(hi_arg, AsmGlobalSymbol):
sym = hi_arg
offset = 0
else:
raise DecompFailure(f"Invalid %hi/@ha argument {hi_arg}")
stack_info = args.stack_info
source = stack_info.global_info.address_of_gsym(sym.symbol_name)
imm = Literal(offset)
return handle_addi_real(args.reg_ref(0), None, source, imm, stack_info)
def handle_convert(expr: Expression, dest_type: Type, source_type: Type) -> Cast:
# int <-> float casts should be explicit
silent = dest_type.data().kind != source_type.data().kind
expr.type.unify(source_type)
return Cast(expr=expr, type=dest_type, silent=silent, reinterpret=False)
def handle_la(args: InstrArgs) -> Expression:
target = args.memory_ref(1)
stack_info = args.stack_info
if isinstance(target, AddressMode):
return handle_addi(
InstrArgs(
raw_args=[args.reg_ref(0), target.rhs, AsmLiteral(target.offset)],
regs=args.regs,
stack_info=args.stack_info,
)
)
var = stack_info.global_info.address_of_gsym(target.sym.symbol_name)
return add_imm(var, Literal(target.offset), stack_info)
def handle_or(left: Expression, right: Expression) -> Expression:
if left == right:
# `or $rD, $rS, $rS` can be used to move $rS into $rD
return left
if isinstance(left, Literal) and isinstance(right, Literal):
if (((left.value & 0xFFFF) == 0 and (right.value & 0xFFFF0000) == 0)) or (
(right.value & 0xFFFF) == 0 and (left.value & 0xFFFF0000) == 0
):
return Literal(value=(left.value | right.value))
# Regular bitwise OR.
return BinaryOp.int(left=left, op="|", right=right)
def handle_sltu(args: InstrArgs) -> Expression:
right = args.reg(2)
if args.reg_ref(1) == Register("zero"):
# (0U < x) is equivalent to (x != 0)
uw_right = early_unwrap(right)
if isinstance(uw_right, BinaryOp) and uw_right.op == "^":
# ((a ^ b) != 0) is equivalent to (a != b)
return BinaryOp.icmp(uw_right.left, "!=", uw_right.right)
return BinaryOp.icmp(right, "!=", Literal(0))
else:
left = args.reg(1)
return BinaryOp.ucmp(left, "<", right)
def handle_sltiu(args: InstrArgs) -> Expression:
left = args.reg(1)
right = args.imm(2)
if isinstance(right, Literal):
value = right.value & 0xFFFFFFFF
if value == 1:
# (x < 1U) is equivalent to (x == 0)
uw_left = early_unwrap(left)
if isinstance(uw_left, BinaryOp) and uw_left.op == "^":
# ((a ^ b) == 0) is equivalent to (a == b)
return BinaryOp.icmp(uw_left.left, "==", uw_left.right)
return BinaryOp.icmp(left, "==", Literal(0))
else:
right = Literal(value)
return BinaryOp.ucmp(left, "<", right)
def handle_addi(args: InstrArgs) -> Expression:
stack_info = args.stack_info
source_reg = args.reg_ref(1)
source = args.reg(1)
imm = args.imm(2)
# `(x + 0xEDCC)` is emitted as `((x + 0x10000) - 0x1234)`,
# i.e. as an `addis` followed by an `addi`
uw_source = early_unwrap(source)
if (
isinstance(uw_source, BinaryOp)
and uw_source.op == "+"
and isinstance(uw_source.right, Literal)
and uw_source.right.value % 0x10000 == 0
and isinstance(imm, Literal)
):
return add_imm(
uw_source.left, Literal(imm.value + uw_source.right.value), stack_info
)
return handle_addi_real(args.reg_ref(0), source_reg, source, imm, stack_info)
def handle_addis(args: InstrArgs) -> Expression:
stack_info = args.stack_info
source_reg = args.reg_ref(1)
source = args.reg(1)
imm = args.shifted_imm(2)
return handle_addi_real(args.reg_ref(0), source_reg, source, imm, stack_info)
def handle_addi_real(
output_reg: Register,
source_reg: Optional[Register],
source: Expression,
imm: Expression,
stack_info: StackInfo,
) -> Expression:
if source_reg is not None and stack_info.is_stack_reg(source_reg):
# Adding to sp, i.e. passing an address.
assert isinstance(imm, Literal)
if stack_info.is_stack_reg(output_reg):
# Changing sp. Just ignore that.
return source
# Keep track of all local variables that we take addresses of.
var = stack_info.get_stack_var(imm.value, store=False)
if isinstance(var, LocalVar):
stack_info.add_local_var(var)
return AddressOf(var, type=var.type.reference())
else:
return add_imm(source, imm, stack_info)
def add_imm(source: Expression, imm: Expression, stack_info: StackInfo) -> Expression:
if imm == Literal(0):
# addiu $reg1, $reg2, 0 is a move
# (this happens when replacing %lo(...) by 0)
return source
elif source.type.is_pointer_or_array():
# Pointer addition (this may miss some pointers that get detected later;
# unfortunately that's hard to do anything about with mips_to_c's single-pass
# architecture).
if isinstance(imm, Literal) and not imm.likely_partial_offset():
array_access = array_access_from_add(
source, imm.value, stack_info, target_size=None, ptr=True
)
if array_access is not None:
return array_access
field_path, field_type, _ = source.type.get_deref_field(
imm.value, target_size=None
)
if field_path is not None:
return AddressOf(
StructAccess(
struct_var=source,
offset=imm.value,
target_size=None,
field_path=field_path,
stack_info=stack_info,
type=field_type,
),
type=field_type.reference(),
)
if isinstance(imm, Literal):
target = source.type.get_pointer_target()
if target:
target_size = target.get_size_bytes()
if target_size and imm.value % target_size == 0:
# Pointer addition.
return BinaryOp(
left=source, op="+", right=as_intish(imm), type=source.type
)
return BinaryOp(left=source, op="+", right=as_intish(imm), type=Type.ptr())
elif isinstance(source, Literal) and isinstance(imm, Literal):
return Literal(source.value + imm.value)
else:
# Regular binary addition.
return BinaryOp.intptr(left=source, op="+", right=imm)
def handle_load(args: InstrArgs, type: Type) -> Expression:
# For now, make the cast silent so that output doesn't become cluttered.
# Though really, it would be great to expose the load types somehow...
size = type.get_size_bytes()
assert size is not None
expr = deref(args.memory_ref(1), args.regs, args.stack_info, size=size)
# Detect rodata constants
if isinstance(expr, StructAccess) and expr.offset == 0:
target = early_unwrap(expr.struct_var)
if (
isinstance(target, AddressOf)
and isinstance(target.expr, GlobalSymbol)
and type.is_likely_float()
):
sym_name = target.expr.symbol_name
ent = args.stack_info.global_info.asm_data_value(sym_name)
if (
ent
and ent.data
and isinstance(ent.data[0], bytes)
and len(ent.data[0]) >= size
and ent.is_readonly
and type.unify(target.expr.type)
):
data = ent.data[0][:size]
val: int
if size == 4:
(val,) = struct.unpack(">I", data)
else:
(val,) = struct.unpack(">Q", data)
return Literal(value=val, type=type)
return as_type(expr, type, silent=True)
def deref_unaligned(
arg: Union[AddressMode, RawSymbolRef],
regs: RegInfo,
stack_info: StackInfo,
*,
store: bool = False,
) -> Expression:
# We don't know the correct size pass to deref. Passing None would signal that we
# are taking an address, cause us to prefer entire substructs as referenced fields,
# which would be confusing. Instead, we lie and pass 1. Hopefully nothing bad will
# happen...
return deref(arg, regs, stack_info, size=1, store=store)
def handle_lwl(args: InstrArgs) -> Expression:
# Unaligned load for the left part of a register (lwl can technically merge with
# a pre-existing lwr, but doesn't in practice, so we treat this as a standard
# destination-first operation)
ref = args.memory_ref(1)
expr = deref_unaligned(ref, args.regs, args.stack_info)
key: Tuple[int, object]
if isinstance(ref, AddressMode):
key = (ref.offset, args.regs[ref.rhs])
else:
key = (ref.offset, ref.sym)
return Lwl(expr, key)
def handle_lwr(args: InstrArgs) -> Expression:
# Unaligned load for the right part of a register. This lwr may merge with an
# existing lwl, if it loads from the same target but with an offset that's +3.
uw_old_value = early_unwrap(args.reg(0))
ref = args.memory_ref(1)
lwl_key: Tuple[int, object]
if isinstance(ref, AddressMode):
lwl_key = (ref.offset - 3, args.regs[ref.rhs])
else:
lwl_key = (ref.offset - 3, ref.sym)
if isinstance(uw_old_value, Lwl) and uw_old_value.key[0] == lwl_key[0]:
return UnalignedLoad(uw_old_value.load_expr)
if ref.offset % 4 == 2:
left_mem_ref = replace(ref, offset=ref.offset - 2)
load_expr = deref_unaligned(left_mem_ref, args.regs, args.stack_info)
return Load3Bytes(load_expr)
return ErrorExpr("Unable to handle lwr; missing a corresponding lwl")
def make_store(args: InstrArgs, type: Type) -> Optional[StoreStmt]:
size = type.get_size_bytes()
assert size is not None
stack_info = args.stack_info
source_reg = args.reg_ref(0)
source_raw = args.regs.get_raw(source_reg)
if type.is_likely_float() and size == 8:
source_val = args.dreg(0)
else:
source_val = args.reg(0)
target = args.memory_ref(1)
is_stack = isinstance(target, AddressMode) and stack_info.is_stack_reg(target.rhs)
if (
is_stack
and source_raw is not None
and stack_info.should_save(source_raw, target.offset)
):
# Elide register preserval.
return None
dest = deref(target, args.regs, stack_info, size=size, store=True)
dest.type.unify(type)
return StoreStmt(source=as_type(source_val, type, silent=is_stack), dest=dest)
def make_storex(args: InstrArgs, type: Type) -> Optional[StoreStmt]:
# "indexed stores" like `stwx rS, rA, rB` write `rS` into `(rA + rB)`
size = type.get_size_bytes()
assert size is not None
source = args.reg(0)
ptr = BinaryOp.intptr(left=args.reg(1), op="+", right=args.reg(2))
# TODO: Can we assume storex's are never used to save registers to the stack?
dest = deref(ptr, args.regs, args.stack_info, size=size, store=True)
dest.type.unify(type)
return StoreStmt(source=as_type(source, type, silent=False), dest=dest)
def handle_swl(args: InstrArgs) -> Optional[StoreStmt]:
# swl in practice only occurs together with swr, so we can treat it as a regular
# store, with the expression wrapped in UnalignedLoad if needed.
source = args.reg(0)
target = args.memory_ref(1)
if not isinstance(early_unwrap(source), UnalignedLoad):
source = UnalignedLoad(source)
dest = deref_unaligned(target, args.regs, args.stack_info, store=True)
return StoreStmt(source=source, dest=dest)
def handle_swr(args: InstrArgs) -> Optional[StoreStmt]:
expr = early_unwrap(args.reg(0))
target = args.memory_ref(1)
if not isinstance(expr, Load3Bytes):
# Elide swr's that don't come from 3-byte-loading lwr's; they probably
# come with a corresponding swl which has already been emitted.
return None
real_target = replace(target, offset=target.offset - 2)
dest = deref_unaligned(real_target, args.regs, args.stack_info, store=True)
return StoreStmt(source=expr, dest=dest)
def handle_sra(args: InstrArgs) -> Expression:
lhs = args.reg(1)
shift = args.imm(2)
if isinstance(shift, Literal) and shift.value in [16, 24]:
expr = early_unwrap(lhs)
pow2 = 1 << shift.value
if isinstance(expr, BinaryOp) and isinstance(expr.right, Literal):
tp = Type.s16() if shift.value == 16 else Type.s8()
rhs = expr.right.value
if expr.op == "<<" and rhs == shift.value:
return as_type(expr.left, tp, silent=False)
elif expr.op == "<<" and rhs > shift.value:
new_shift = fold_mul_chains(
BinaryOp.int(expr.left, "<<", Literal(rhs - shift.value))
)
return as_type(new_shift, tp, silent=False)
elif expr.op == "*" and rhs % pow2 == 0 and rhs != pow2:
mul = BinaryOp.int(expr.left, "*", Literal(value=rhs // pow2))
return as_type(mul, tp, silent=False)
return fold_divmod(
BinaryOp(as_sintish(lhs), ">>", as_intish(shift), type=Type.s32())
)
def handle_conditional_move(args: InstrArgs, nonzero: bool) -> Expression:
op = "!=" if nonzero else "=="
type = Type.any_reg()
return TernaryOp(
BinaryOp.scmp(args.reg(2), op, Literal(0)),
as_type(args.reg(1), type, silent=True),
as_type(args.reg(0), type, silent=True),
type,
)
def format_f32_imm(num: int) -> str:
packed = struct.pack(">I", num & (2 ** 32 - 1))
value = struct.unpack(">f", packed)[0]
if not value or value == 4294967296.0:
# Zero, negative zero, nan, or INT_MAX.
return str(value)
# Write values smaller than 1e-7 / greater than 1e7 using scientific notation,
# and values in between using fixed point.
if abs(math.log10(abs(value))) > 6.9:
fmt_char = "e"
elif abs(value) < 1:
fmt_char = "f"
else:
fmt_char = "g"
def fmt(prec: int) -> str:
"""Format 'value' with 'prec' significant digits/decimals, in either scientific
or regular notation depending on 'fmt_char'."""
ret = ("{:." + str(prec) + fmt_char + "}").format(value)
if fmt_char == "e":
return ret.replace("e+", "e").replace("e0", "e").replace("e-0", "e-")
if "e" in ret:
# The "g" format character can sometimes introduce scientific notation if
# formatting with too few decimals. If this happens, return an incorrect
# value to prevent the result from being used.
#
# Since the value we are formatting is within (1e-7, 1e7) in absolute
# value, it will at least be possible to format with 7 decimals, which is
# less than float precision. Thus, this annoying Python limitation won't
# lead to us outputting numbers with more precision than we really have.
return "0"
return ret
# 20 decimals is more than enough for a float. Start there, then try to shrink it.
prec = 20
while prec > 0:
prec -= 1
value2 = float(fmt(prec))
if struct.pack(">f", value2) != packed:
prec += 1
break
if prec == 20:
# Uh oh, even the original value didn't format correctly. Fall back to str(),
# which ought to work.
return str(value)
ret = fmt(prec)
if "." not in ret:
ret += ".0"
return ret
def format_f64_imm(num: int) -> str:
(value,) = struct.unpack(">d", struct.pack(">Q", num & (2 ** 64 - 1)))
return str(value)
def fold_divmod(original_expr: BinaryOp) -> BinaryOp:
"""
Return a new BinaryOp instance if this one can be simplified to a single / or % op.
This involves simplifying expressions using MULT_HI, MULTU_HI, +, -, <<, >>, and /.
In GCC 2.7.2, the code that generates these instructions is in expmed.c.
See also https://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html
for a modern writeup of a similar algorithm.
This optimization is also used by MWCC and modern compilers (but not IDO).
"""
mult_high_ops = ("MULT_HI", "MULTU_HI")
possible_match_ops = mult_high_ops + ("-", "+", ">>")
# Only operate on integer expressions of certain operations
if original_expr.is_floating() or original_expr.op not in possible_match_ops:
return original_expr
# Use `early_unwrap_ints` instead of `early_unwrap` to ignore Casts to integer types
# Although this discards some extra type information, this function largely ignores
# sign/size information to stay simpler. The result will be made with BinaryOp.int()
# regardless of input types.
expr = original_expr
left_expr = early_unwrap_ints(expr.left)
right_expr = early_unwrap_ints(expr.right)
divisor_shift = 0
# Detect signed power-of-two division: (x >> N) + MIPS2C_CARRY --> x / (1 << N)
if (
isinstance(left_expr, BinaryOp)
and left_expr.op == ">>"
and isinstance(left_expr.right, Literal)
and expr.op == "+"
and isinstance(right_expr, CarryBit)
):
new_denom = 1 << left_expr.right.value
return BinaryOp.sint(
left=left_expr.left,
op="/",
right=Literal(new_denom),
silent=True,
)
# Fold `/` with `>>`: ((x / N) >> M) --> x / (N << M)
# NB: If x is signed, this is only correct if there is a sign-correcting subtraction term
if (
isinstance(left_expr, BinaryOp)
and left_expr.op == "/"
and isinstance(left_expr.right, Literal)
and expr.op == ">>"
and isinstance(right_expr, Literal)
):
new_denom = left_expr.right.value << right_expr.value
if new_denom < (1 << 32):
return BinaryOp.int(
left=left_expr.left,
op="/",
right=Literal(new_denom),
)
# Detect `%`: (x - ((x / y) * y)) --> x % y
if expr.op == "-" and isinstance(right_expr, BinaryOp) and right_expr.op == "*":
div_expr = early_unwrap_ints(right_expr.left)
mod_base = early_unwrap_ints(right_expr.right)
if (
isinstance(div_expr, BinaryOp)
and early_unwrap_ints(div_expr.left) == left_expr
):
# Accept either `(x / y) * y` or `(x >> N) * M` (where `1 << N == M`)
divisor = early_unwrap_ints(div_expr.right)
if (div_expr.op == "/" and divisor == mod_base) or (
div_expr.op == ">>"
and isinstance(divisor, Literal)
and isinstance(mod_base, Literal)
and (1 << divisor.value) == mod_base.value
):
return BinaryOp.int(left=left_expr, op="%", right=right_expr.right)
# Detect dividing by a negative: ((x >> 31) - (x / N)) --> x / -N
if (
expr.op == "-"
and isinstance(left_expr, BinaryOp)
and left_expr.op == ">>"
and early_unwrap_ints(left_expr.right) == Literal(31)
and isinstance(right_expr, BinaryOp)
and right_expr.op == "/"
and isinstance(right_expr.right, Literal)
):
# Swap left_expr & right_expr, but replace the N in right_expr with -N
left_expr, right_expr = (
replace(right_expr, right=Literal(-right_expr.right.value)),
left_expr,
)
# Remove outer error term: ((x / N) + ((x / N) >> 31)) --> x / N
# As N gets close to (1 << 30), this is no longer a negligible error term
if (
expr.op == "+"
and isinstance(left_expr, BinaryOp)
and left_expr.op == "/"
and isinstance(left_expr.right, Literal)
and left_expr.right.value <= (1 << 29)
and isinstance(right_expr, BinaryOp)
and early_unwrap_ints(right_expr.left) == left_expr
and right_expr.op == ">>"
and early_unwrap_ints(right_expr.right) == Literal(31)
):
return left_expr
# Remove outer error term: ((x / N) - (x >> 31)) --> x / N
if (
expr.op == "-"
and isinstance(left_expr, BinaryOp)
and left_expr.op == "/"
and isinstance(left_expr.right, Literal)
and isinstance(right_expr, BinaryOp)
and right_expr.op == ">>"
and early_unwrap_ints(right_expr.right) == Literal(31)
):
div_expr = left_expr
shift_var_expr = early_unwrap_ints(right_expr.left)
div_var_expr = early_unwrap_ints(div_expr.left)
# Check if the LHS of the shift is the same var that we're dividing by
if div_var_expr == shift_var_expr:
if isinstance(div_expr.right, Literal) and div_expr.right.value >= (
1 << 30
):
return BinaryOp.int(
left=div_expr.left,
op=div_expr.op,
right=div_expr.right,
)
return div_expr
# If the var is under 32 bits, the error term may look like `(x << K) >> 31` instead
if (
isinstance(shift_var_expr, BinaryOp)
and early_unwrap_ints(div_expr.left)
== early_unwrap_ints(shift_var_expr.left)
and shift_var_expr.op == "<<"
and isinstance(shift_var_expr.right, Literal)
):
return div_expr
# Shift on the result of the mul: MULT_HI(x, N) >> M, shift the divisor by M
if (
isinstance(left_expr, BinaryOp)
and expr.op == ">>"
and isinstance(right_expr, Literal)
):
divisor_shift += right_expr.value
expr = left_expr
left_expr = early_unwrap_ints(expr.left)
right_expr = early_unwrap_ints(expr.right)
# Normalize MULT_HI(N, x) to MULT_HI(x, N)
if isinstance(left_expr, Literal) and not isinstance(right_expr, Literal):
left_expr, right_expr = right_expr, left_expr
# Remove inner addition: (MULT_HI(x, N) + x) >> M --> MULT_HI(x, N) >> M
# MULT_HI performs signed multiplication, so the `+ x` acts as setting the 32nd bit
# while having a result with the same sign as x.
# We can ignore it because `round_div` can work with arbitrarily large constants
if (
isinstance(left_expr, BinaryOp)
and left_expr.op == "MULT_HI"
and expr.op == "+"
and early_unwrap_ints(left_expr.left) == right_expr
):
expr = left_expr
left_expr = early_unwrap_ints(expr.left)
right_expr = early_unwrap_ints(expr.right)
# Shift on the LHS of the mul: MULT_HI(x >> M, N) --> MULT_HI(x, N) >> M
if (
expr.op in mult_high_ops
and isinstance(left_expr, BinaryOp)
and left_expr.op == ">>"
and isinstance(left_expr.right, Literal)
):
divisor_shift += left_expr.right.value
left_expr = early_unwrap_ints(left_expr.left)
# Instead of checking for the error term precisely, just check that
# the quotient is "close enough" to the integer value
def round_div(x: int, y: int) -> Optional[int]:
if y <= 1:
return None
result = round(x / y)
if x / (y + 1) <= result <= x / (y - 1):
return result
return None
if expr.op in mult_high_ops and isinstance(right_expr, Literal):
denom = round_div(1 << (32 + divisor_shift), right_expr.value)
if denom is not None:
return BinaryOp.int(
left=left_expr,
op="/",
right=Literal(denom),
)
return original_expr
def replace_clz_shift(expr: BinaryOp) -> BinaryOp:
"""
Simplify an expression matching `CLZ(x) >> 5` into `x == 0`,
and further simplify `(a - b) == 0` into `a == b`.
"""
# Check that the outer expression is `>>`
if expr.is_floating() or expr.op != ">>":
return expr
# Match `CLZ(x) >> 5`, or return the original expr
left_expr = early_unwrap_ints(expr.left)
right_expr = early_unwrap_ints(expr.right)
if not (
isinstance(left_expr, UnaryOp)
and left_expr.op == "CLZ"
and isinstance(right_expr, Literal)
and right_expr.value == 5
):
return expr
# If the inner `x` is `(a - b)`, return `a == b`
sub_expr = early_unwrap(left_expr.expr)
if (
isinstance(sub_expr, BinaryOp)
and not sub_expr.is_floating()
and sub_expr.op == "-"
):
return BinaryOp.icmp(sub_expr.left, "==", sub_expr.right)
return BinaryOp.icmp(left_expr.expr, "==", Literal(0, type=left_expr.expr.type))
def replace_bitand(expr: BinaryOp) -> Expression:
"""Detect expressions using `&` for truncating integer casts"""
if not expr.is_floating() and expr.op == "&":
if expr.right == Literal(0xFF):
return as_type(expr.left, Type.int_of_size(8), silent=False)
if expr.right == Literal(0xFFFF):
return as_type(expr.left, Type.int_of_size(16), silent=False)
return expr
def fold_mul_chains(expr: Expression) -> Expression:
"""Simplify an expression involving +, -, * and << to a single multiplication,
e.g. 4*x - x -> 3*x, or x<<2 -> x*4. This includes some logic for preventing
folds of consecutive sll, and keeping multiplications by large powers of two
as bitshifts at the top layer."""
def fold(
expr: Expression, toplevel: bool, allow_sll: bool
) -> Tuple[Expression, int]:
if isinstance(expr, BinaryOp):
lbase, lnum = fold(expr.left, False, (expr.op != "<<"))
rbase, rnum = fold(expr.right, False, (expr.op != "<<"))
if expr.op == "<<" and isinstance(expr.right, Literal) and allow_sll:
# Left-shifts by small numbers are easier to understand if
# written as multiplications (they compile to the same thing).
if toplevel and lnum == 1 and not (1 <= expr.right.value <= 4):
return (expr, 1)
return (lbase, lnum << expr.right.value)
if (
expr.op == "*"
and isinstance(expr.right, Literal)
and (allow_sll or expr.right.value % 2 != 0)
):
return (lbase, lnum * expr.right.value)
if early_unwrap(lbase) == early_unwrap(rbase):
if expr.op == "+":
return (lbase, lnum + rnum)
if expr.op == "-":
return (lbase, lnum - rnum)
if isinstance(expr, UnaryOp) and expr.op == "-" and not toplevel:
base, num = fold(expr.expr, False, True)
return (base, -num)
if (
isinstance(expr, EvalOnceExpr)
and not expr.emit_exactly_once
and not expr.forced_emit
):
base, num = fold(early_unwrap(expr), False, allow_sll)
if num != 1 and is_trivial_expression(base):
return (base, num)
return (expr, 1)
base, num = fold(expr, True, True)
if num == 1:
return expr
return BinaryOp.int(left=base, op="*", right=Literal(num))
def array_access_from_add(
expr: Expression,
offset: int,
stack_info: StackInfo,
*,
target_size: Optional[int],
ptr: bool,
) -> Optional[Expression]:
expr = early_unwrap(expr)
if not isinstance(expr, BinaryOp) or expr.op != "+":
return None
base = expr.left
addend = expr.right
if addend.type.is_pointer_or_array() and not base.type.is_pointer_or_array():
base, addend = addend, base
index: Expression
scale: int
uw_addend = early_unwrap(addend)
if (
isinstance(uw_addend, BinaryOp)
and uw_addend.op == "*"
and isinstance(uw_addend.right, Literal)
):
index = uw_addend.left
scale = uw_addend.right.value
elif (
isinstance(uw_addend, BinaryOp)
and uw_addend.op == "<<"
and isinstance(uw_addend.right, Literal)
):
index = uw_addend.left
scale = 1 << uw_addend.right.value
else:
index = addend
scale = 1
if scale < 0:
scale = -scale
index = UnaryOp.sint("-", index)
target_type = base.type.get_pointer_target()
if target_type is None:
return None
uw_base = early_unwrap(base)
typepool = stack_info.global_info.typepool
# In `&x + index * scale`, if the type of `x` is not known, try to mark it as an array.
# Skip the `scale = 1` case because this often indicates a complex `index` expression,
# and is not actually a 1-byte array lookup.
if (
scale > 1
and offset == 0
and isinstance(uw_base, AddressOf)
and target_type.get_size_bytes() is None
):
inner_type: Optional[Type] = None
if (
isinstance(uw_base.expr, GlobalSymbol)
and uw_base.expr.potential_array_dim(scale)[1] != 0
):
# For GlobalSymbols, use the size of the asm data to check the feasibility of being
# an array with `scale`. This helps be more conservative around fake symbols.
pass
elif scale == 2:
# This *could* be a struct, but is much more likely to be an int
inner_type = Type.int_of_size(16)
elif scale == 4:
inner_type = Type.reg32(likely_float=False)
elif typepool.unk_inference and isinstance(uw_base.expr, GlobalSymbol):
# Make up a struct with a tag name based on the symbol & struct size.
# Although `scale = 8` could indicate an array of longs/doubles, it seems more
# common to be an array of structs.
struct_name = f"_struct_{uw_base.expr.symbol_name}_0x{scale:X}"
struct = typepool.get_struct_by_tag_name(
struct_name, stack_info.global_info.typemap
)
if struct is None:
struct = StructDeclaration.unknown(
typepool, size=scale, tag_name=struct_name
)
elif struct.size != scale:
# This should only happen if there was already a struct with this name in the context
raise DecompFailure(f"sizeof(struct {struct_name}) != {scale:#x}")
inner_type = Type.struct(struct)
if inner_type is not None:
# This might fail, if `uw_base.expr.type` can't be changed to an array
uw_base.expr.type.unify(Type.array(inner_type, dim=None))
# This acts as a backup, and will usually succeed
target_type.unify(inner_type)
if target_type.get_size_bytes() == scale:
# base[index]
pass
else:
# base->subarray[index]
sub_path, sub_type, remaining_offset = base.type.get_deref_field(
offset, target_size=scale, exact=False
)
# Check if the last item in the path is `0`, which indicates the start of an array
# If it is, remove it: it will be replaced by `[index]`
if sub_path is None or len(sub_path) < 2 or sub_path[-1] != 0:
return None
sub_path.pop()
base = StructAccess(
struct_var=base,
offset=offset - remaining_offset,
target_size=None,
field_path=sub_path,
stack_info=stack_info,
type=sub_type,
)
offset = remaining_offset
target_type = sub_type
ret: Expression = ArrayAccess(base, index, type=target_type)
# Add .field if necessary by wrapping ret in StructAccess(AddressOf(...))
ret_ref = AddressOf(ret, type=ret.type.reference())
field_path, field_type, _ = ret_ref.type.get_deref_field(
offset, target_size=target_size
)
if offset != 0 or (target_size is not None and target_size != scale):
ret = StructAccess(
struct_var=ret_ref,
offset=offset,
target_size=target_size,
field_path=field_path,
stack_info=stack_info,
type=field_type,
)
if ptr:
ret = AddressOf(ret, type=ret.type.reference())
return ret
def handle_add(args: InstrArgs) -> Expression:
lhs = args.reg(1)
rhs = args.reg(2)
stack_info = args.stack_info
type = Type.intptr()
# Because lhs & rhs are in registers, it shouldn't be possible for them to be arrays.
# If they are, treat them the same as pointers anyways.
if lhs.type.is_pointer_or_array():
type = Type.ptr()
elif rhs.type.is_pointer_or_array():
type = Type.ptr()
# addiu instructions can sometimes be emitted as addu instead, when the
# offset is too large.
if isinstance(rhs, Literal):
return handle_addi_real(args.reg_ref(0), args.reg_ref(1), lhs, rhs, stack_info)
if isinstance(lhs, Literal):
return handle_addi_real(args.reg_ref(0), args.reg_ref(2), rhs, lhs, stack_info)
expr = BinaryOp(left=as_intptr(lhs), op="+", right=as_intptr(rhs), type=type)
folded_expr = fold_mul_chains(expr)
if isinstance(folded_expr, BinaryOp):
folded_expr = fold_divmod(folded_expr)
if folded_expr is not expr:
return folded_expr
array_expr = array_access_from_add(expr, 0, stack_info, target_size=None, ptr=True)
if array_expr is not None:
return array_expr
return expr
def handle_add_float(args: InstrArgs) -> Expression:
if args.reg_ref(1) == args.reg_ref(2):
two = Literal(1 << 30, type=Type.f32())
return BinaryOp.f32(two, "*", args.reg(1))
return BinaryOp.f32(args.reg(1), "+", args.reg(2))
def handle_add_double(args: InstrArgs) -> Expression:
if args.reg_ref(1) == args.reg_ref(2):
two = Literal(1 << 62, type=Type.f64())
return BinaryOp.f64(two, "*", args.dreg(1))
return BinaryOp.f64(args.dreg(1), "+", args.dreg(2))
def handle_bgez(args: InstrArgs) -> Condition:
expr = args.reg(0)
uw_expr = early_unwrap(expr)
if (
isinstance(uw_expr, BinaryOp)
and uw_expr.op == "<<"
and isinstance(uw_expr.right, Literal)
):
shift = uw_expr.right.value
bitand = BinaryOp.int(uw_expr.left, "&", Literal(1 << (31 - shift)))
return UnaryOp("!", bitand, type=Type.bool())
return BinaryOp.scmp(expr, ">=", Literal(0))
def rlwi_mask(mask_begin: int, mask_end: int) -> int:
# Compute the mask constant used by the rlwi* family of PPC instructions,
# referred to as the `MASK(MB, ME)` function in the processor manual.
# Bit 0 is the MSB, Bit 31 is the LSB
bits_upto: Callable[[int], int] = lambda m: (1 << (32 - m)) - 1
all_ones = 0xFFFFFFFF
if mask_begin <= mask_end:
# Set bits inside the range, fully inclusive
mask = bits_upto(mask_begin) - bits_upto(mask_end + 1)
else:
# Set bits from [31, mask_end] and [mask_begin, 0]
mask = (bits_upto(mask_end + 1) - bits_upto(mask_begin)) ^ all_ones
return mask
def handle_rlwinm(
source: Expression,
shift: int,
mask_begin: int,
mask_end: int,
simplify: bool = True,
) -> Expression:
# TODO: Detect shift + truncate, like `(x << 2) & 0xFFF3` or `(x >> 2) & 0x3FFF`
# The output of the rlwinm instruction is `ROTL(source, shift) & mask`. We write this as
# ((source << shift) & mask) | ((source >> (32 - shift)) & mask)
# and compute both OR operands (upper_bits and lower_bits respectively).
all_ones = 0xFFFFFFFF
mask = rlwi_mask(mask_begin, mask_end)
left_shift = shift
right_shift = 32 - shift
left_mask = (all_ones << left_shift) & mask
right_mask = (all_ones >> right_shift) & mask
# We only simplify if the `simplify` argument is True, and there will be no `|` in the
# resulting expression. If there is an `|`, the expression is best left as bitwise math
simplify = simplify and not (left_mask and right_mask)
if isinstance(source, Literal):
upper_value = (source.value << left_shift) & mask
lower_value = (source.value >> right_shift) & mask
return Literal(upper_value | lower_value)
upper_bits: Optional[Expression]
if left_mask == 0:
upper_bits = None
else:
upper_bits = source
if left_shift != 0:
upper_bits = BinaryOp.int(
left=upper_bits, op="<<", right=Literal(left_shift)
)
if simplify:
upper_bits = fold_mul_chains(upper_bits)
if left_mask != (all_ones << left_shift) & all_ones:
upper_bits = BinaryOp.int(left=upper_bits, op="&", right=Literal(left_mask))
if simplify:
upper_bits = replace_bitand(upper_bits)
lower_bits: Optional[Expression]
if right_mask == 0:
lower_bits = None
else:
lower_bits = BinaryOp.uint(left=source, op=">>", right=Literal(right_shift))
if simplify:
lower_bits = replace_clz_shift(fold_divmod(lower_bits))
if right_mask != (all_ones >> right_shift) & all_ones:
lower_bits = BinaryOp.int(
left=lower_bits, op="&", right=Literal(right_mask)
)
if simplify:
lower_bits = replace_bitand(lower_bits)
if upper_bits is None and lower_bits is None:
return Literal(0)
elif upper_bits is None:
assert lower_bits is not None
return lower_bits
elif lower_bits is None:
return upper_bits
else:
return BinaryOp.int(left=upper_bits, op="|", right=lower_bits)
def handle_rlwimi(
base: Expression, source: Expression, shift: int, mask_begin: int, mask_end: int
) -> Expression:
# This instruction reads from `base`, replaces some bits with values from `source`, then
# writes the result back into the first register. This can be used to copy any contiguous
# bitfield from `source` into `base`, and is commonly used when manipulating flags, such
# as in `x |= 0x10` or `x &= ~0x10`.
# It's generally more readable to write the mask with `~` (instead of computing the inverse here)
mask_literal = Literal(rlwi_mask(mask_begin, mask_end))
mask = UnaryOp("~", mask_literal, type=Type.u32())
masked_base = BinaryOp.int(left=base, op="&", right=mask)
if source == Literal(0):
# If the source is 0, there are no bits inserted. (This may look like `x &= ~0x10`)
return masked_base
# Set `simplify=False` to keep the `inserted` expression as bitwise math instead of `*` or `/`
inserted = handle_rlwinm(source, shift, mask_begin, mask_end, simplify=False)
if inserted == mask_literal:
# If this instruction will set all the bits in the mask, we can OR the values
# together without masking the base. (`x |= 0xF0` instead of `x = (x & ~0xF0) | 0xF0`)
return BinaryOp.int(left=base, op="|", right=inserted)
return BinaryOp.int(left=masked_base, op="|", right=inserted)
def handle_loadx(args: InstrArgs, type: Type) -> Expression:
# "indexed loads" like `lwzx rD, rA, rB` read `(rA + rB)` into `rD`
size = type.get_size_bytes()
assert size is not None
ptr = BinaryOp.intptr(left=args.reg(1), op="+", right=args.reg(2))
expr = deref(ptr, args.regs, args.stack_info, size=size)
return as_type(expr, type, silent=True)
def strip_macros(arg: Argument) -> Argument:
"""Replace %lo(...) by 0, and assert that there are no %hi(...). We assume that
%hi's only ever occur in lui, where we expand them to an entire value, and not
just the upper part. This preserves semantics in most cases (though not when %hi's
are reused for different %lo's...)"""
if isinstance(arg, Macro):
if arg.macro_name in ["sda2", "sda21"]:
return arg.argument
if arg.macro_name == "hi":
raise DecompFailure("%hi macro outside of lui")
if arg.macro_name not in ["lo", "l"]:
raise DecompFailure(f"Unrecognized linker macro %{arg.macro_name}")
# This is sort of weird; for `symbol@l` we return 0 here and assume
# that this @l is always perfectly paired with one other @ha.
# However, with `literal@l`, we return the literal value, and assume it is
# paired with another `literal@ha`. This lets us reuse `literal@ha` values,
# but assumes that we never mix literals & symbols
if isinstance(arg.argument, AsmLiteral):
return AsmLiteral(arg.argument.value)
return AsmLiteral(0)
elif isinstance(arg, AsmAddressMode) and isinstance(arg.lhs, Macro):
if arg.lhs.macro_name in ["sda2", "sda21"]:
return arg.lhs.argument
if arg.lhs.macro_name not in ["lo", "l"]:
raise DecompFailure(
f"Bad linker macro in instruction argument {arg}, expected %lo"
)
return AsmAddressMode(lhs=AsmLiteral(0), rhs=arg.rhs)
else:
return arg
@dataclass
class AbiArgSlot:
offset: int
reg: Optional[Register]
type: Type
name: Optional[str] = None
comment: Optional[str] = None
@dataclass
class Abi:
arg_slots: List[AbiArgSlot]
possible_slots: List[AbiArgSlot]
def reg_always_set(node: Node, reg: Register, *, dom_set: bool) -> bool:
if node.immediate_dominator is None:
return False
seen = {node.immediate_dominator}
stack = node.parents[:]
while stack:
n = stack.pop()
if n == node.immediate_dominator and not dom_set:
return False
if n in seen:
continue
seen.add(n)
clobbered: Optional[bool] = None
for instr in n.block.instructions:
with current_instr(instr):
if reg in instr.outputs:
clobbered = False
elif reg in instr.clobbers:
clobbered = True
if clobbered == True:
return False
if clobbered is None:
stack.extend(n.parents)
return True
def pick_phi_assignment_nodes(
reg: Register, nodes: List[Node], expr: Expression
) -> List[Node]:
"""
As part of `assign_phis()`, we need to pick a set of nodes where we can emit a
`SetPhiStmt` that assigns the phi for `reg` to `expr`.
The final register state for `reg` for each node in `nodes` is `expr`,
so the best case would be finding a single dominating node for the assignment.
"""
# Find the set of nodes which dominate *all* of `nodes`, sorted by number
# of dominators. (This puts "earlier" nodes at the beginning of the list.)
dominators = sorted(
set.intersection(*(node.dominators for node in nodes)),
key=lambda n: len(n.dominators),
)
# Check the dominators for a node with the correct final state for `reg`
for node in dominators:
regs = get_block_info(node).final_register_states
raw = regs.get_raw(reg)
meta = regs.get_meta(reg)
if raw is None or meta is None or meta.force:
continue
if raw == expr:
return [node]
# We couldn't find anything, so fall back to the naive solution
# TODO: In some cases there may be a better solution (e.g. one that requires 2 nodes)
return nodes
def assign_phis(used_phis: List[PhiExpr], stack_info: StackInfo) -> None:
i = 0
# Iterate over used phis until there are no more remaining. New ones may
# appear during iteration, hence the while loop.
while i < len(used_phis):
phi = used_phis[i]
assert phi.num_usages > 0
assert len(phi.node.parents) >= 2
# Group parent nodes by the value of their phi register
equivalent_nodes: DefaultDict[Expression, List[Node]] = defaultdict(list)
for node in phi.node.parents:
expr = get_block_info(node).final_register_states[phi.reg]
expr.type.unify(phi.type)
equivalent_nodes[expr].append(node)
exprs = list(equivalent_nodes.keys())
first_uw = early_unwrap(exprs[0])
if all(early_unwrap(e) == first_uw for e in exprs[1:]):
# All the phis have the same value (e.g. because we recomputed an
# expression after a store, or restored a register after a function
# call). Just use that value instead of introducing a phi node.
# TODO: the unwrapping here is necessary, but also kinda sketchy:
# we may set as replacement_expr an expression that really shouldn't
# be repeated, e.g. a StructAccess. It would make sense to use less
# eager unwrapping, and/or to emit an EvalOnceExpr at this point
# (though it's too late for it to be able to participate in the
# prevent_later_uses machinery).
phi.replacement_expr = as_type(first_uw, phi.type, silent=True)
for _ in range(phi.num_usages):
first_uw.use()
else:
for expr, nodes in equivalent_nodes.items():
for node in pick_phi_assignment_nodes(phi.reg, nodes, expr):
block_info = get_block_info(node)
expr = block_info.final_register_states[phi.reg]
if isinstance(expr, PhiExpr):
# Explicitly mark how the expression is used if it's a phi,
# so we can propagate phi sets (to get rid of temporaries).
expr.use(from_phi=phi)
else:
expr.use()
typed_expr = as_type(expr, phi.type, silent=True)
block_info.to_write.append(SetPhiStmt(phi, typed_expr))
i += 1
name_counter: Dict[Register, int] = {}
for phi in used_phis:
if not phi.replacement_expr and phi.propagates_to() == phi:
counter = name_counter.get(phi.reg, 0) + 1
name_counter[phi.reg] = counter
output_reg_name = stack_info.function.reg_formatter.format(phi.reg)
prefix = f"phi_{output_reg_name}"
phi.name = f"{prefix}_{counter}" if counter > 1 else prefix
stack_info.phi_vars.append(phi)
def propagate_register_meta(nodes: List[Node], reg: Register) -> None:
"""Propagate RegMeta bits forwards/backwards."""
non_terminal: List[Node] = [n for n in nodes if not isinstance(n, TerminalNode)]
# Set `is_read` based on `read_inherited`.
for n in non_terminal:
if reg in get_block_info(n).final_register_states.read_inherited:
for p in n.parents:
par_meta = get_block_info(p).final_register_states.get_meta(reg)
if par_meta:
par_meta.is_read = True
# Propagate `is_read` backwards.
todo = non_terminal[:]
while todo:
n = todo.pop()
meta = get_block_info(n).final_register_states.get_meta(reg)
for p in n.parents:
par_meta = get_block_info(p).final_register_states.get_meta(reg)
if (par_meta and not par_meta.is_read) and (
meta and meta.inherited and meta.is_read
):
par_meta.is_read = True
todo.append(p)
# Set `uninteresting` and propagate it, `function_return`, and `in_pattern` forwards.
# Start by assuming inherited values are all set; they will get unset iteratively,
# but for cyclic dependency purposes we want to assume them set.
for n in non_terminal:
meta = get_block_info(n).final_register_states.get_meta(reg)
if meta:
if meta.inherited:
meta.uninteresting = True
meta.function_return = True
meta.in_pattern = True
else:
meta.uninteresting |= (
meta.is_read or meta.function_return or meta.in_pattern
)
todo = non_terminal[:]
while todo:
n = todo.pop()
if isinstance(n, TerminalNode):
continue
meta = get_block_info(n).final_register_states.get_meta(reg)
if not meta or not meta.inherited:
continue
all_uninteresting = True
all_function_return = True
all_in_pattern = True
for p in n.parents:
par_meta = get_block_info(p).final_register_states.get_meta(reg)
if par_meta:
all_uninteresting &= par_meta.uninteresting
all_function_return &= par_meta.function_return
all_in_pattern &= par_meta.in_pattern
if meta.uninteresting and not all_uninteresting and not meta.is_read:
meta.uninteresting = False
todo.extend(n.children())
if meta.function_return and not all_function_return:
meta.function_return = False
todo.extend(n.children())
if meta.in_pattern and not all_in_pattern:
meta.in_pattern = False
todo.extend(n.children())
def determine_return_register(
return_blocks: List[BlockInfo], fn_decl_provided: bool, arch: Arch
) -> Optional[Register]:
"""Determine which of the arch's base_return_regs (i.e. v0, f0) is the most
likely to contain the return value, or if the function is likely void."""
def priority(block_info: BlockInfo, reg: Register) -> int:
meta = block_info.final_register_states.get_meta(reg)
if not meta:
return 4
if meta.uninteresting:
return 2
if meta.in_pattern:
return 1
if meta.function_return:
return 0
return 3
if not return_blocks:
return None
best_reg: Optional[Register] = None
best_prio = -1
for reg in arch.base_return_regs:
prios = [priority(b, reg) for b in return_blocks]
max_prio = max(prios)
if max_prio == 4:
# Register is not always set, skip it
continue
if max_prio <= 2 and not fn_decl_provided:
# Register is always read after being written, or comes from a
# function call; seems unlikely to be an intentional return.
# Skip it, unless we have a known non-void return type.
continue
if max_prio > best_prio:
best_prio = max_prio
best_reg = reg
return best_reg
def translate_node_body(node: Node, regs: RegInfo, stack_info: StackInfo) -> BlockInfo:
"""
Given a node and current register contents, return a BlockInfo containing
the translated AST for that node.
"""
to_write: List[Union[Statement]] = []
local_var_writes: Dict[LocalVar, Tuple[Register, Expression]] = {}
subroutine_args: Dict[int, Expression] = {}
branch_condition: Optional[Condition] = None
switch_expr: Optional[Expression] = None
has_custom_return: bool = False
has_function_call: bool = False
in_pattern: bool = False
arch = stack_info.global_info.arch
def eval_once(
expr: Expression,
*,
emit_exactly_once: bool,
trivial: bool,
prefix: str = "",
reuse_var: Optional[Var] = None,
) -> EvalOnceExpr:
if emit_exactly_once:
# (otherwise this will be marked used once num_usages reaches 1)
expr.use()
elif "_fictive_" in prefix and isinstance(expr, EvalOnceExpr):
# Avoid creating additional EvalOnceExprs for fictive Registers
# so they're less likely to appear in the output
return expr
assert reuse_var or prefix
if prefix == "condition_bit":
prefix = "cond"
var = reuse_var or Var(stack_info, "temp_" + prefix)
expr = EvalOnceExpr(
wrapped_expr=expr,
var=var,
type=expr.type,
emit_exactly_once=emit_exactly_once,
trivial=trivial,
)
var.num_usages += 1
stmt = EvalOnceStmt(expr)
to_write.append(stmt)
stack_info.temp_vars.append(stmt)
return expr
def prevent_later_uses(expr_filter: Callable[[Expression], bool]) -> None:
"""Prevent later uses of registers whose contents match a callback filter."""
for r in regs.contents.keys():
data = regs.contents.get(r)
assert data is not None
expr = data.value
if not data.meta.force and expr_filter(expr):
# Mark the register as "if used, emit the expression's once
# var". We usually always have a once var at this point,
# but if we don't, create one.
if not isinstance(expr, EvalOnceExpr):
expr = eval_once(
expr,
emit_exactly_once=False,
trivial=False,
prefix=stack_info.function.reg_formatter.format(r),
)
# This write isn't changing the value of the register; it didn't need
# to be declared as part of the current instruction's inputs/outputs.
regs.unchecked_set_with_meta(r, expr, replace(data.meta, force=True))
def prevent_later_value_uses(sub_expr: Expression) -> None:
"""Prevent later uses of registers that recursively contain a given
subexpression."""
# Unused PassedInArg are fine; they can pass the uses_expr test simply based
# on having the same variable name. If we didn't filter them out here it could
# cause them to be incorrectly passed as function arguments -- the function
# call logic sees an opaque wrapper and doesn't realize that they are unused
# arguments that should not be passed on.
prevent_later_uses(
lambda e: uses_expr(e, lambda e2: e2 == sub_expr)
and not (isinstance(e, PassedInArg) and not e.copied)
)
def prevent_later_function_calls() -> None:
"""Prevent later uses of registers that recursively contain a function call."""
prevent_later_uses(lambda e: uses_expr(e, lambda e2: isinstance(e2, FuncCall)))
def prevent_later_reads() -> None:
"""Prevent later uses of registers that recursively contain a read."""
contains_read = lambda e: isinstance(e, (StructAccess, ArrayAccess))
prevent_later_uses(lambda e: uses_expr(e, contains_read))
def set_reg_maybe_return(reg: Register, expr: Expression) -> None:
regs.set_with_meta(reg, expr, RegMeta(in_pattern=in_pattern))
def set_reg(reg: Register, expr: Optional[Expression]) -> Optional[Expression]:
if expr is None:
if reg in regs:
del regs[reg]
return None
if isinstance(expr, LocalVar):
if (
isinstance(node, ReturnNode)
and stack_info.maybe_get_register_var(reg)
and stack_info.in_callee_save_reg_region(expr.value)
and reg in stack_info.callee_save_regs
):
# Elide saved register restores with --reg-vars (it doesn't
# matter in other cases).
return None
if expr in local_var_writes:
# Elide register restores (only for the same register for now,
# to be conversative).
orig_reg, orig_expr = local_var_writes[expr]
if orig_reg == reg:
expr = orig_expr
uw_expr = expr
if not isinstance(expr, Literal):
expr = eval_once(
expr,
emit_exactly_once=False,
trivial=is_trivial_expression(expr),
prefix=stack_info.function.reg_formatter.format(reg),
)
if reg == Register("zero"):
# Emit the expression as is. It's probably a volatile load.
expr.use()
to_write.append(ExprStmt(expr))
else:
dest = stack_info.maybe_get_register_var(reg)
if dest is not None:
stack_info.use_register_var(dest)
# Avoid emitting x = x, but still refresh EvalOnceExpr's etc.
if not (isinstance(uw_expr, RegisterVar) and uw_expr.reg == reg):
source = as_type(expr, dest.type, True)
source.use()
to_write.append(StoreStmt(source=source, dest=dest))
expr = dest
set_reg_maybe_return(reg, expr)
return expr
def clear_caller_save_regs() -> None:
for reg in arch.temp_regs:
if reg in regs:
del regs[reg]
def maybe_clear_local_var_writes(func_args: List[Expression]) -> None:
# Clear the `local_var_writes` dict if any of the `func_args` contain
# a reference to a stack var. (The called function may modify the stack,
# replacing the value we have in `local_var_writes`.)
for arg in func_args:
if uses_expr(
arg,
lambda expr: isinstance(expr, AddressOf)
and isinstance(expr.expr, LocalVar),
):
local_var_writes.clear()
return
def process_instr(instr: Instruction) -> None:
nonlocal branch_condition, switch_expr, has_function_call, in_pattern
in_pattern = instr.in_pattern
mnemonic = instr.mnemonic
arch_mnemonic = instr.arch_mnemonic(arch)
args = InstrArgs(instr.args, regs, stack_info)
expr: Expression
# Figure out what code to generate!
if mnemonic in arch.instrs_ignore:
pass
elif mnemonic in arch.instrs_store or mnemonic in arch.instrs_store_update:
# Store a value in a permanent place.
if mnemonic in arch.instrs_store:
to_store = arch.instrs_store[mnemonic](args)
else:
# PPC specific store-and-update instructions
# `stwu r3, 8(r4)` is equivalent to `$r3 = *($r4 + 8); $r4 += 8;`
to_store = arch.instrs_store_update[mnemonic](args)
# Update the register in the second argument
update = args.memory_ref(1)
if not isinstance(update, AddressMode):
raise DecompFailure(
f"Unhandled store-and-update arg in {instr}: {update!r}"
)
set_reg(
update.rhs,
add_imm(args.regs[update.rhs], Literal(update.offset), stack_info),
)
if to_store is None:
# Elided register preserval.
pass
elif isinstance(to_store.dest, SubroutineArg):
# About to call a subroutine with this argument. Skip arguments for the
# first four stack slots; they are also passed in registers.
if to_store.dest.value >= 0x10:
subroutine_args[to_store.dest.value] = to_store.source
else:
if isinstance(to_store.dest, LocalVar):
stack_info.add_local_var(to_store.dest)
raw_value = to_store.source
if isinstance(raw_value, Cast) and raw_value.reinterpret:
# When preserving values on the stack across function calls,
# ignore the type of the stack variable. The same stack slot
# might be used to preserve values of different types.
raw_value = raw_value.expr
local_var_writes[to_store.dest] = (args.reg_ref(0), raw_value)
# Emit a write. This includes four steps:
# - mark the expression as used (since writes are always emitted)
# - mark the dest used (if it's a struct access it needs to be
# evaluated, though ideally we would not mark the top-level expression
# used; it may cause early emissions that shouldn't happen)
# - mark other usages of the dest as "emit before this point if used".
# - emit the actual write.
#
# Note that the prevent_later_value_uses step happens after use(), since
# the stored expression is allowed to reference its destination var,
# but before the write is written, since prevent_later_value_uses might
# emit writes of its own that should go before this write. In practice
# that probably never occurs -- all relevant register contents should be
# EvalOnceExpr's that can be emitted at their point of creation, but
# I'm not 100% certain that that's always the case and will remain so.
to_store.source.use()
to_store.dest.use()
prevent_later_value_uses(to_store.dest)
prevent_later_function_calls()
to_write.append(to_store)
elif mnemonic in arch.instrs_source_first:
# Just 'mtc1'. It's reversed, so we have to specially handle it.
set_reg(args.reg_ref(1), arch.instrs_source_first[mnemonic](args))
elif mnemonic in arch.instrs_branches:
assert branch_condition is None
branch_condition = arch.instrs_branches[mnemonic](args)
elif mnemonic in arch.instrs_float_branches:
assert branch_condition is None
cond_bit = regs[Register("condition_bit")]
if not isinstance(cond_bit, BinaryOp):
cond_bit = ExprCondition(cond_bit, type=cond_bit.type)
if arch_mnemonic == "mips:bc1t":
branch_condition = cond_bit
elif arch_mnemonic == "mips:bc1f":
branch_condition = cond_bit.negated()
elif mnemonic in arch.instrs_jumps:
if arch_mnemonic == "ppc:bctr":
# Switch jump
assert isinstance(node, SwitchNode)
switch_expr = args.regs[Register("ctr")]
elif arch_mnemonic == "mips:jr":
# MIPS:
if args.reg_ref(0) == arch.return_address_reg:
# Return from the function.
assert isinstance(node, ReturnNode)
else:
# Switch jump.
assert isinstance(node, SwitchNode)
switch_expr = args.reg(0)
elif arch_mnemonic == "ppc:blr":
assert isinstance(node, ReturnNode)
else:
assert False, f"Unhandled jump mnemonic {arch_mnemonic}"
elif mnemonic in arch.instrs_fn_call:
if arch_mnemonic in ["mips:jal", "ppc:bl"]:
fn_target = args.imm(0)
if not (
(
isinstance(fn_target, AddressOf)
and isinstance(fn_target.expr, GlobalSymbol)
)
or isinstance(fn_target, Literal)
):
raise DecompFailure(
f"Target of function call must be a symbol, not {fn_target}"
)
elif arch_mnemonic == "ppc:blrl":
fn_target = args.regs[Register("lr")]
elif arch_mnemonic == "ppc:bctrl":
fn_target = args.regs[Register("ctr")]
elif arch_mnemonic == "mips:jalr":
fn_target = args.reg(1)
else:
assert False, f"Unhandled fn call mnemonic {arch_mnemonic}"
fn_target = as_function_ptr(fn_target)
fn_sig = fn_target.type.get_function_pointer_signature()
assert fn_sig is not None, "known function pointers must have a signature"
likely_regs: Dict[Register, bool] = {}
for reg, data in regs.contents.items():
# We use a much stricter filter for PPC than MIPS, because the same
# registers can be used arguments & return values.
# The ABI can also mix & match the rN & fN registers, which makes the
# "require" heuristic less powerful.
#
# - `meta.inherited` will only be False for registers set in *this* basic block
# - `meta.function_return` will only be accurate for registers set within this
# basic block because we have not called `propagate_register_meta` yet.
# Within this block, it will be True for registers that were return values.
if arch.arch == Target.ArchEnum.PPC and (
data.meta.inherited or data.meta.function_return
):
likely_regs[reg] = False
elif data.meta.in_pattern:
# Like `meta.function_return` mentioned above, `meta.in_pattern` will only be
# accurate for registers set within this basic block.
likely_regs[reg] = False
elif isinstance(data.value, PassedInArg) and not data.value.copied:
likely_regs[reg] = False
else:
likely_regs[reg] = True
abi = arch.function_abi(fn_sig, likely_regs, for_call=True)
func_args: List[Expression] = []
for slot in abi.arg_slots:
if slot.reg:
expr = regs[slot.reg]
elif slot.offset in subroutine_args:
expr = subroutine_args.pop(slot.offset)
else:
expr = ErrorExpr(
f"Unable to find stack arg {slot.offset:#x} in block"
)
func_args.append(
CommentExpr.wrap(
as_type(expr, slot.type, True), prefix=slot.comment
)
)
for slot in abi.possible_slots:
assert slot.reg is not None
func_args.append(regs[slot.reg])
# Add the arguments after a3.
# TODO: limit this based on abi.arg_slots. If the function type is known
# and not variadic, this list should be empty.
for _, arg in sorted(subroutine_args.items()):
if fn_sig.params_known and not fn_sig.is_variadic:
func_args.append(CommentExpr.wrap(arg, prefix="extra?"))
else:
func_args.append(arg)
if not fn_sig.params_known:
while len(func_args) > len(fn_sig.params):
fn_sig.params.append(FunctionParam())
# When the function signature isn't provided, the we only assume that each
# parameter is "simple" (<=4 bytes, no return struct, etc.). This may not
# match the actual function signature, but it's the best we can do.
# Without that assumption, the logic from `function_abi` would be needed here.
for i, (arg_expr, param) in enumerate(zip(func_args, fn_sig.params)):
func_args[i] = as_type(arg_expr, param.type.decay(), True)
# Reset subroutine_args, for the next potential function call.
subroutine_args.clear()
call: Expression = FuncCall(
fn_target, func_args, fn_sig.return_type.weaken_void_ptr()
)
call = eval_once(call, emit_exactly_once=True, trivial=False, prefix="ret")
# Clear out caller-save registers, for clarity and to ensure that
# argument regs don't get passed into the next function.
clear_caller_save_regs()
# Clear out local var write tracking if any argument contains a stack
# reference. That dict is used to track register saves/restores, which
# are unreliable if we call a function with a stack reference.
maybe_clear_local_var_writes(func_args)
# Prevent reads and function calls from moving across this call.
# This isn't really right, because this call might be moved later,
# and then this prevention should also be... but it's the best we
# can do with the current code architecture.
prevent_later_function_calls()
prevent_later_reads()
return_reg_vals = arch.function_return(call)
for out in instr.outputs:
if not isinstance(out, Register):
continue
val = return_reg_vals[out]
if not isinstance(val, SecondF64Half):
val = eval_once(
val,
emit_exactly_once=False,
trivial=False,
prefix=stack_info.function.reg_formatter.format(out),
)
regs.set_with_meta(out, val, RegMeta(function_return=True))
has_function_call = True
elif mnemonic in arch.instrs_float_comp:
expr = arch.instrs_float_comp[mnemonic](args)
regs[Register("condition_bit")] = expr
elif mnemonic in arch.instrs_hi_lo:
hi, lo = arch.instrs_hi_lo[mnemonic](args)
set_reg(Register("hi"), hi)
set_reg(Register("lo"), lo)
elif mnemonic in arch.instrs_implicit_destination:
reg, expr_fn = arch.instrs_implicit_destination[mnemonic]
set_reg(reg, expr_fn(args))
elif mnemonic in arch.instrs_ppc_compare:
if instr.args[0] != Register("cr0"):
raise DecompFailure(
f"Instruction {instr} not supported (first arg is not $cr0)"
)
set_reg(Register("cr0_eq"), arch.instrs_ppc_compare[mnemonic](args, "=="))
set_reg(Register("cr0_gt"), arch.instrs_ppc_compare[mnemonic](args, ">"))
set_reg(Register("cr0_lt"), arch.instrs_ppc_compare[mnemonic](args, "<"))
set_reg(Register("cr0_so"), Literal(0))
elif mnemonic in arch.instrs_no_dest:
stmt = arch.instrs_no_dest[mnemonic](args)
to_write.append(stmt)
elif mnemonic.rstrip(".") in arch.instrs_destination_first:
target = args.reg_ref(0)
val = arch.instrs_destination_first[mnemonic.rstrip(".")](args)
# TODO: IDO tends to keep variables within single registers. Thus,
# if source = target, maybe we could overwrite that variable instead
# of creating a new one?
target_val = set_reg(target, val)
mn_parts = arch_mnemonic.split(".")
if arch_mnemonic.startswith("ppc:") and arch_mnemonic.endswith("."):
# PPC instructions suffixed with . set condition bits (CR0) based on the result value
if target_val is None:
target_val = val
set_reg(
Register("cr0_eq"),
BinaryOp.icmp(target_val, "==", Literal(0, type=target_val.type)),
)
# Use manual casts for cr0_gt/cr0_lt so that the type of target_val is not modified
# until the resulting bit is .use()'d.
target_s32 = Cast(
target_val, reinterpret=True, silent=True, type=Type.s32()
)
set_reg(
Register("cr0_gt"),
BinaryOp(target_s32, ">", Literal(0), type=Type.s32()),
)
set_reg(
Register("cr0_lt"),
BinaryOp(target_s32, "<", Literal(0), type=Type.s32()),
)
set_reg(
Register("cr0_so"),
fn_op("MIPS2C_OVERFLOW", [target_val], type=Type.s32()),
)
elif (
len(mn_parts) >= 2
and mn_parts[0].startswith("mips:")
and mn_parts[1] == "d"
) or arch_mnemonic == "mips:ldc1":
set_reg(target.other_f64_reg(), SecondF64Half())
elif mnemonic in arch.instrs_load_update:
target = args.reg_ref(0)
val = arch.instrs_load_update[mnemonic](args)
set_reg(target, val)
if arch_mnemonic in ["ppc:lwzux", "ppc:lhzux", "ppc:lbzux"]:
# In `rD, rA, rB`, update `rA = rA + rB`
update_reg = args.reg_ref(1)
offset = args.reg(2)
else:
# In `rD, rA(N)`, update `rA = rA + N`
update = args.memory_ref(1)
if not isinstance(update, AddressMode):
raise DecompFailure(
f"Unhandled store-and-update arg in {instr}: {update!r}"
)
update_reg = update.rhs
offset = Literal(update.offset)
if update_reg == target:
raise DecompFailure(
f"Invalid instruction, rA and rD must be different in {instr}"
)
set_reg(update_reg, add_imm(args.regs[update_reg], offset, stack_info))
else:
expr = ErrorExpr(f"unknown instruction: {instr}")
if arch_mnemonic.startswith("ppc:") and arch_mnemonic.endswith("."):
# Unimplemented PPC instructions that modify CR0
set_reg(Register("cr0_eq"), expr)
set_reg(Register("cr0_gt"), expr)
set_reg(Register("cr0_lt"), expr)
set_reg(Register("cr0_so"), expr)
if args.count() >= 1 and isinstance(args.raw_arg(0), Register):
reg = args.reg_ref(0)
expr = eval_once(
expr,
emit_exactly_once=True,
trivial=False,
prefix=stack_info.function.reg_formatter.format(reg),
)
if reg != Register("zero"):
set_reg_maybe_return(reg, expr)
else:
to_write.append(ExprStmt(expr))
for instr in node.block.instructions:
with regs.current_instr(instr):
process_instr(instr)
if branch_condition is not None:
branch_condition.use()
switch_control: Optional[SwitchControl] = None
if switch_expr is not None:
switch_control = SwitchControl.from_expr(switch_expr)
switch_control.control_expr.use()
return BlockInfo(
to_write=to_write,
return_value=None,
switch_control=switch_control,
branch_condition=branch_condition,
final_register_states=regs,
has_function_call=has_function_call,
)
def translate_graph_from_block(
node: Node,
regs: RegInfo,
stack_info: StackInfo,
used_phis: List[PhiExpr],
return_blocks: List[BlockInfo],
options: Options,
) -> None:
"""
Given a FlowGraph node and a dictionary of register contents, give that node
its appropriate BlockInfo (which contains the AST of its code).
"""
if options.debug:
print(f"\nNode in question: {node}")
# Translate the given node and discover final register states.
try:
block_info = translate_node_body(node, regs, stack_info)
if options.debug:
print(block_info)
except Exception as e: # TODO: handle issues better
if options.stop_on_error:
raise
instr: Optional[Instruction] = None
if isinstance(e, InstrProcessingFailure) and isinstance(e.__cause__, Exception):
instr = e.instr
e = e.__cause__
if isinstance(e, DecompFailure):
emsg = str(e)
print(emsg)
else:
tb = e.__traceback__
traceback.print_exception(None, e, tb)
emsg = str(e) or traceback.format_tb(tb)[-1]
emsg = emsg.strip().split("\n")[-1].strip()
error_stmts: List[Statement] = [CommentStmt(f"Error: {emsg}")]
if instr is not None:
print(
f"Error occurred while processing instruction: {instr}", file=sys.stderr
)
error_stmts.append(CommentStmt(f"At instruction: {instr}"))
print(file=sys.stderr)
block_info = BlockInfo(
to_write=error_stmts,
return_value=None,
switch_control=None,
branch_condition=ErrorExpr(),
final_register_states=regs,
has_function_call=False,
)
node.block.add_block_info(block_info)
if isinstance(node, ReturnNode):
return_blocks.append(block_info)
# Translate everything dominated by this node, now that we know our own
# final register state. This will eventually reach every node.
for child in node.immediately_dominates:
if isinstance(child, TerminalNode):
continue
new_regs = RegInfo(stack_info=stack_info)
for reg, data in regs.contents.items():
new_regs.set_with_meta(
reg, data.value, RegMeta(inherited=True, force=data.meta.force)
)
phi_regs = (
r for r in locs_clobbered_until_dominator(child) if isinstance(r, Register)
)
for reg in phi_regs:
if reg_always_set(child, reg, dom_set=(reg in regs)):
expr: Optional[Expression] = stack_info.maybe_get_register_var(reg)
if expr is None:
expr = PhiExpr(
reg=reg, node=child, used_phis=used_phis, type=Type.any_reg()
)
new_regs.set_with_meta(reg, expr, RegMeta(inherited=True))
elif reg in new_regs:
del new_regs[reg]
translate_graph_from_block(
child, new_regs, stack_info, used_phis, return_blocks, options
)
def resolve_types_late(stack_info: StackInfo) -> None:
"""
After translating a function, perform a final type-resolution pass.
"""
# Final check over stack var types. Because of delayed type unification, some
# locations should now be marked as "weak".
for location in stack_info.weak_stack_var_types.keys():
stack_info.get_stack_var(location, store=False)
# Use dereferences to determine pointer types
struct_type_map = stack_info.get_struct_type_map()
for var, offset_type_map in struct_type_map.items():
if len(offset_type_map) == 1 and 0 in offset_type_map:
# var was probably a plain pointer, not a struct
# Try to unify it with the appropriate pointer type,
# to fill in the type if it does not already have one
type = offset_type_map[0]
var.type.unify(Type.ptr(type))
@dataclass
class FunctionInfo:
stack_info: StackInfo
flow_graph: FlowGraph
return_type: Type
symbol: GlobalSymbol
@dataclass
class GlobalInfo:
asm_data: AsmData
arch: Arch
target: Target
local_functions: Set[str]
typemap: TypeMap
typepool: TypePool
global_symbol_map: Dict[str, GlobalSymbol] = field(default_factory=dict)
def asm_data_value(self, sym_name: str) -> Optional[AsmDataEntry]:
return self.asm_data.values.get(sym_name)
def address_of_gsym(self, sym_name: str) -> AddressOf:
if sym_name in self.global_symbol_map:
sym = self.global_symbol_map[sym_name]
else:
demangled_symbol: Optional[CxxSymbol] = None
demangled_str: Optional[str] = None
if self.target.language == Target.LanguageEnum.CXX:
try:
demangled_symbol = demangle_codewarrior_parse(sym_name)
except ValueError:
pass
else:
demangled_str = str(demangled_symbol)
sym = self.global_symbol_map[sym_name] = GlobalSymbol(
symbol_name=sym_name,
type=Type.any(),
asm_data_entry=self.asm_data_value(sym_name),
demangled_str=demangled_str,
)
# If the symbol is a C++ vtable, try to build a custom type for it by parsing it
if (
self.target.language == Target.LanguageEnum.CXX
and sym_name.startswith("__vt__")
and sym.asm_data_entry is not None
):
sym.type.unify(self.vtable_type(sym_name, sym.asm_data_entry))
fn = self.typemap.functions.get(sym_name)
ctype: Optional[CType]
if fn is not None:
ctype = fn.type
else:
ctype = self.typemap.var_types.get(sym_name)
if ctype is not None:
sym.symbol_in_context = True
sym.initializer_in_typemap = (
sym_name in self.typemap.vars_with_initializers
)
sym.type.unify(Type.ctype(ctype, self.typemap, self.typepool))
if sym_name not in self.typepool.unknown_decls:
sym.type_provided = True
elif sym_name in self.local_functions:
sym.type.unify(Type.function())
# Do this after unifying the type in the typemap, so that it has lower precedence
if demangled_symbol is not None:
sym.type.unify(
Type.demangled_symbol(self.typemap, self.typepool, demangled_symbol)
)
return AddressOf(sym, type=sym.type.reference())
def vtable_type(self, sym_name: str, asm_data_entry: AsmDataEntry) -> Type:
"""
Parse MWCC vtable data to create a custom struct to represent it.
This format is not well documented, but is briefly explored in this series of posts:
https://web.archive.org/web/20220413174849/http://hacksoflife.blogspot.com/2007/02/c-objects-part-2-single-inheritance.html
"""
size = asm_data_entry.size_range_bytes()[1]
struct = StructDeclaration.unknown(
self.typepool, size=size, align=4, tag_name=sym_name
)
offset = 0
for entry in asm_data_entry.data:
if isinstance(entry, bytes):
# MWCC vtables start with a pointer to a typeid struct (or NULL) and an offset
if len(entry) % 4 != 0:
raise DecompFailure(
f"Unable to parse misaligned vtable data in {sym_name}"
)
for i in range(len(entry) // 4):
field_name = f"{struct.new_field_prefix}{offset:X}"
struct.try_add_field(
Type.reg32(likely_float=False), offset, field_name, size=4
)
offset += 4
else:
entry_name = entry
try:
demangled_field_sym = demangle_codewarrior_parse(entry)
if demangled_field_sym.name.qualified_name is not None:
entry_name = str(demangled_field_sym.name.qualified_name[-1])
except ValueError:
pass
field = struct.try_add_field(
self.address_of_gsym(entry).type,
offset,
name=entry_name,
size=4,
)
assert field is not None
field.known = True
offset += 4
return Type.struct(struct)
def is_function_known_void(self, sym_name: str) -> bool:
"""Return True if the function exists in the context, and has no return value"""
fn = self.typemap.functions.get(sym_name)
if fn is None:
return False
return fn.ret_type is None
def initializer_for_symbol(
self, sym: GlobalSymbol, fmt: Formatter
) -> Optional[str]:
assert sym.asm_data_entry is not None
data = sym.asm_data_entry.data[:]
def read_uint(n: int) -> Optional[int]:
"""Read the next `n` bytes from `data` as an (long) integer"""
assert 0 < n <= 8
if not data or not isinstance(data[0], bytes):
return None
if len(data[0]) < n:
return None
bs = data[0][:n]
data[0] = data[0][n:]
if not data[0]:
del data[0]
value = 0
for b in bs:
value = (value << 8) | b
return value
def read_pointer() -> Optional[Expression]:
"""Read the next label from `data`"""
if not data or not isinstance(data[0], str):
return None
label = data[0]
data.pop(0)
return self.address_of_gsym(label)
def for_type(type: Type) -> Optional[str]:
"""Return the initializer for a single element of type `type`"""
if type.is_struct() or type.is_array():
struct_fields = type.get_initializer_fields()
if not struct_fields:
return None
members = []
for field in struct_fields:
if isinstance(field, int):
# Check that all padding bytes are 0
for i in range(field):
padding = read_uint(1)
if padding != 0:
return None
else:
m = for_type(field)
if m is None:
return None
members.append(m)
return fmt.format_array(members)
if type.is_reg():
size = type.get_size_bytes()
if not size:
return None
if size == 4:
ptr = read_pointer()
if ptr is not None:
return as_type(ptr, type, silent=True).format(fmt)
value = read_uint(size)
if value is not None:
enum_name = type.get_enum_name(value)
if enum_name is not None:
return enum_name
expr = as_type(Literal(value), type, True)
return elide_casts_for_store(expr).format(fmt)
# Type kinds K_FN and K_VOID do not have initializers
return None
return for_type(sym.type)
def find_forward_declares_needed(self, functions: List[FunctionInfo]) -> Set[str]:
funcs_seen = set()
forward_declares_needed = self.asm_data.mentioned_labels
for func in functions:
funcs_seen.add(func.stack_info.function.name)
for instr in func.stack_info.function.body:
if not isinstance(instr, Instruction):
continue
for arg in instr.args:
if isinstance(arg, AsmGlobalSymbol):
func_name = arg.symbol_name
elif isinstance(arg, Macro) and isinstance(
arg.argument, AsmGlobalSymbol
):
func_name = arg.argument.symbol_name
else:
continue
if func_name in self.local_functions:
if func_name not in funcs_seen:
forward_declares_needed.add(func_name)
return forward_declares_needed
def global_decls(
self,
fmt: Formatter,
decls: Options.GlobalDeclsEnum,
functions: List[FunctionInfo],
) -> str:
# Format labels from symbol_type_map into global declarations.
# As the initializers are formatted, this may cause more symbols
# to be added to the global_symbol_map.
forward_declares_needed = self.find_forward_declares_needed(functions)
lines = []
processed_names: Set[str] = set()
while True:
names: AbstractSet[str] = self.global_symbol_map.keys()
if decls == Options.GlobalDeclsEnum.ALL:
names |= self.asm_data.values.keys()
names -= processed_names
if not names:
break
for name in sorted(names):
processed_names.add(name)
sym = self.address_of_gsym(name).expr
assert isinstance(sym, GlobalSymbol)
data_entry = sym.asm_data_entry
# Is the label defined in this unit (in the active AsmData file(s))
is_in_file = data_entry is not None or name in self.local_functions
# Is the label externally visible (mentioned in the context file)
is_global = sym.symbol_in_context
# Is the label a symbol in .rodata?
is_const = data_entry is not None and data_entry.is_readonly
if data_entry and data_entry.is_jtbl:
# Skip jump tables
continue
if is_in_file and is_global and sym.type.is_function():
# Skip externally-declared functions that are defined here
continue
if self.local_functions == {name}:
# Skip the function being decompiled if just a single one
continue
if not is_in_file and sym.type_provided:
# Skip externally-declared symbols that are defined in other files
continue
# TODO: Use original MIPSFile ordering for variables
sort_order = (
not sym.type.is_function(),
is_global,
is_in_file,
is_const,
name,
)
qualifier = ""
value: Optional[str] = None
comments = []
# Determine type qualifier: static, extern, or neither
if is_in_file and is_global:
qualifier = ""
elif is_in_file:
qualifier = "static"
else:
qualifier = "extern"
if sym.type.is_function():
comments.append(qualifier)
qualifier = ""
# Try to guess if the symbol is an array (and if it is, its dimension) if
# we have a data entry for it, and the symbol is either not in the typemap
# or was a variable-length array there ("VLA", e.g. `int []`)
# (Otherwise, if the dim is provided by the typemap, we trust it.)
element_type, array_dim = sym.type.get_array()
is_vla = element_type is not None and (
array_dim is None or array_dim <= 0
)
if data_entry and (not sym.type_provided or is_vla):
# The size of the data entry is uncertain, because of padding
# between sections. Generally `(max_data_size - data_size) < 16`.
min_data_size, max_data_size = data_entry.size_range_bytes()
# The size of the element type (not the size of the array type)
if element_type is None:
element_type = sym.type
# If we don't know the type, we can't guess the array_dim
type_size = element_type.get_size_bytes()
if type_size:
potential_dim, extra_bytes = sym.potential_array_dim(type_size)
if potential_dim == 0 and extra_bytes > 0:
# The type is too big for our data. (not an array)
comments.append(
f"type too large by {fmt.format_int(type_size - extra_bytes)}"
)
elif potential_dim > 1 or is_vla:
# NB: In general, replacing the types of Expressions can be sketchy.
# However, the GlobalSymbol here came from address_of_gsym(), which
# always returns a reference to the element_type.
array_dim = potential_dim
sym.type = Type.array(element_type, array_dim)
if potential_dim != 0 and extra_bytes > 0:
comments.append(
f"extra bytes: {fmt.format_int(extra_bytes)}"
)
# Try to convert the data from .data/.rodata into an initializer
if data_entry and not data_entry.is_bss:
value = self.initializer_for_symbol(sym, fmt)
if value is None:
# This warning helps distinguish .bss symbols from .data/.rodata,
# IDO only puts symbols in .bss if they don't have any initializer
comments.append("unable to generate initializer")
if is_const:
comments.append("const")
# Float & string constants are almost always inlined and can be omitted
if sym.is_string_constant():
continue
if array_dim is None and sym.type.is_likely_float():
continue
# In "none" mode, do not emit any decls
if decls == Options.GlobalDeclsEnum.NONE:
continue
# In modes except "all", skip the decl if the context file already had an initializer
if decls != Options.GlobalDeclsEnum.ALL and sym.initializer_in_typemap:
continue
# In modes except "all", skip vtable decls when compiling C++
if (
decls != Options.GlobalDeclsEnum.ALL
and self.target.language == Target.LanguageEnum.CXX
and name.startswith("__vt__")
):
continue
if (
sym.type.is_function()
and decls != Options.GlobalDeclsEnum.ALL
and name in self.local_functions
and name not in forward_declares_needed
):
continue
qualifier = f"{qualifier} " if qualifier else ""
value = f" = {value}" if value else ""
lines.append(
(
sort_order,
fmt.with_comments(
f"{qualifier}{sym.type.to_decl(name, fmt)}{value};",
comments,
)
+ "\n",
)
)
lines.sort()
return "".join(line for _, line in lines)
def narrow_func_call_outputs(
function: Function,
global_info: GlobalInfo,
) -> None:
"""
Modify the `outputs` list of function call Instructions using the context file.
For now, this only handles known-void functions, but in the future it could
be extended to select a specific register subset based on type.
"""
for instr in function.body:
if (
isinstance(instr, Instruction)
and isinstance(instr.function_target, AsmGlobalSymbol)
and global_info.is_function_known_void(instr.function_target.symbol_name)
):
instr.outputs.clear()
def translate_to_ast(
function: Function,
flow_graph: FlowGraph,
options: Options,
global_info: GlobalInfo,
) -> FunctionInfo:
"""
Given a function, produce a FlowGraph that both contains control-flow
information and has AST transformations for each block of code and
branch condition.
"""
# Initialize info about the function.
stack_info = get_stack_info(function, global_info, flow_graph)
start_regs: RegInfo = RegInfo(stack_info=stack_info)
arch = global_info.arch
start_regs[arch.stack_pointer_reg] = GlobalSymbol("sp", type=Type.ptr())
for reg in arch.saved_regs:
start_regs[reg] = stack_info.saved_reg_symbol(reg.register_name)
fn_sym = global_info.address_of_gsym(function.name).expr
assert isinstance(fn_sym, GlobalSymbol)
fn_type = fn_sym.type
fn_type.unify(Type.function())
fn_sig = Type.ptr(fn_type).get_function_pointer_signature()
assert fn_sig is not None, "fn_type is known to be a function"
return_type = fn_sig.return_type
stack_info.is_variadic = fn_sig.is_variadic
def make_arg(offset: int, type: Type) -> PassedInArg:
assert offset % 4 == 0
return PassedInArg(offset, copied=False, stack_info=stack_info, type=type)
abi = arch.function_abi(
fn_sig,
likely_regs={reg: True for reg in arch.argument_regs},
for_call=False,
)
for slot in abi.arg_slots:
stack_info.add_known_param(slot.offset, slot.name, slot.type)
if slot.reg is not None:
start_regs.set_with_meta(
slot.reg, make_arg(slot.offset, slot.type), RegMeta(uninteresting=True)
)
for slot in abi.possible_slots:
if slot.reg is not None:
start_regs.set_with_meta(
slot.reg, make_arg(slot.offset, slot.type), RegMeta(uninteresting=True)
)
if options.reg_vars == ["saved"]:
reg_vars = arch.saved_regs
elif options.reg_vars == ["most"]:
reg_vars = arch.saved_regs + arch.simple_temp_regs
elif options.reg_vars == ["all"]:
reg_vars = arch.saved_regs + arch.simple_temp_regs + arch.argument_regs
else:
reg_vars = [
stack_info.function.reg_formatter.parse(x, arch) for x in options.reg_vars
]
for reg in reg_vars:
reg_name = stack_info.function.reg_formatter.format(reg)
stack_info.add_register_var(reg, reg_name)
if options.debug:
print(stack_info)
print("\nNow, we attempt to translate:")
used_phis: List[PhiExpr] = []
return_blocks: List[BlockInfo] = []
translate_graph_from_block(
flow_graph.entry_node(),
start_regs,
stack_info,
used_phis,
return_blocks,
options,
)
for reg in arch.base_return_regs:
propagate_register_meta(flow_graph.nodes, reg)
return_reg: Optional[Register] = None
if not options.void and not return_type.is_void():
return_reg = determine_return_register(
return_blocks, fn_sym.type_provided, arch
)
if return_reg is not None:
for b in return_blocks:
if return_reg in b.final_register_states:
ret_val = b.final_register_states[return_reg]
ret_val = as_type(ret_val, return_type, True)
ret_val.use()
b.return_value = ret_val
else:
return_type.unify(Type.void())
if not fn_sig.params_known:
while len(fn_sig.params) < len(stack_info.arguments):
fn_sig.params.append(FunctionParam())
for param, arg in zip(fn_sig.params, stack_info.arguments):
param.type.unify(arg.type)
if not param.name:
param.name = arg.format(Formatter())
assign_phis(used_phis, stack_info)
resolve_types_late(stack_info)
if options.pdb_translate:
import pdb
v: Dict[str, object] = {}
fmt = Formatter()
for local in stack_info.local_vars:
var_name = local.format(fmt)
v[var_name] = local
for temp in stack_info.temp_vars:
if temp.need_decl():
var_name = temp.expr.var.format(fmt)
v[var_name] = temp.expr
for phi in stack_info.phi_vars:
assert phi.name is not None
v[phi.name] = phi
pdb.set_trace()
return FunctionInfo(stack_info, flow_graph, return_type, fn_sym)
|
from PySock import client
def abc(data,con):
print(f"Message from {data["sender_name"]} : {data["data"]}")
con.SEND("test","Hurrah! it's working.")
def client_msg(data):
print(f"Message from : {data["sender_name"]} => {data["data"]}")
c = client(client_name = "swat", debug = True)
c.CLIENT("localhost",8888)
c.CREATE_CHANNEL("test")
while True:
c.LISTEN( channel = "test", function = abc, args = (c,) )
c.LISTEN( channel = "DSP_MSG", function = client_msg)
|
from PySock import client
def abc(data,con):
print(f"Message from {data['sender_name']} : {data['data']}")
con.SEND("test","Hurrah! it's working.")
def client_msg(data):
print(f"Message from : {data['sender_name']} => {data['data']}")
c = client(client_name = "swat", debug = True)
c.CLIENT("localhost",8888)
c.CREATE_CHANNEL("test")
while True:
c.LISTEN( channel = "test", function = abc, args = (c,) )
c.LISTEN( channel = "DSP_MSG", function = client_msg)
|
import pandas as pd
from sklearn.model_selection import train_test_split
random_state = 100
data = pd.read_csv("~/headlinegen/data/nytime_front_page.csv")
data['title'] = data['title'].apply(lambda x: ' '.join(x.split(' ')[:-5]))
lens = data["content"].apply(lambda x: len(x.split(" "))).nlargest(10)
print(
f'max_input_len = {data['content'].apply(lambda x: len(x.split(' '))).min()}')
print(
f'max_output_len = {data['title'].apply(lambda x: len(x.split(' '))).max()}')
print(lens)
# train, valid_test = train_test_split(data,
# test_size=0.2,
# random_state=random_state,
# shuffle=True)
# valid, test = train_test_split(valid_test,
# test_size=0.5,
# random_state=random_state,
# shuffle=True)
# print(train.shape, valid.shape, test.shape)
# for dataset, prefix in zip([train, valid, test], ['train', 'val', 'test']):
# for columnname, suffix in zip(['content', 'title'], ['source', 'target']):
# filename = "/Users/satyasiddharthdash/headlinegen/data/nytimes/" + prefix + '.' + suffix
# with open(filename, 'w') as outfile:
# outfile.write(dataset[columnname].str.cat(sep='\n'))
|
import pandas as pd
from sklearn.model_selection import train_test_split
random_state = 100
data = pd.read_csv("~/headlinegen/data/nytime_front_page.csv")
data['title'] = data['title'].apply(lambda x: ' '.join(x.split(' ')[:-5]))
lens = data["content"].apply(lambda x: len(x.split(" "))).nlargest(10)
print(
f'max_input_len = {data["content"].apply(lambda x: len(x.split(" "))).min()}')
print(
f'max_output_len = {data["title"].apply(lambda x: len(x.split(" "))).max()}')
print(lens)
# train, valid_test = train_test_split(data,
# test_size=0.2,
# random_state=random_state,
# shuffle=True)
# valid, test = train_test_split(valid_test,
# test_size=0.5,
# random_state=random_state,
# shuffle=True)
# print(train.shape, valid.shape, test.shape)
# for dataset, prefix in zip([train, valid, test], ['train', 'val', 'test']):
# for columnname, suffix in zip(['content', 'title'], ['source', 'target']):
# filename = "/Users/satyasiddharthdash/headlinegen/data/nytimes/" + prefix + '.' + suffix
# with open(filename, 'w') as outfile:
# outfile.write(dataset[columnname].str.cat(sep='\n'))
|
import os
from functools import wraps
from flask import flash, redirect, render_template, url_for, current_app, Markup, request
from flask_login import login_user, login_required, logout_user, current_user
from app.auth import bp
from app.auth.forms import SignUpForm, RegistrationForm, LoginForm, ResetPasswordForm, NewPasswordForm, UserForm
from app.auth.email import send_email
from itsdangerous import URLSafeTimedSerializer
from app.models import User
from app import db
def offer_to_log_in(email: str):
href = f"""<a href="{url_for("auth.login", email=email)}" class="danger-link">Log In</a>"""
message = f"The email: {email} is used. Please {href}."
flash(Markup(message), 'danger')
def get_email_from_token(token):
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = serializer.loads(token, salt=current_app.config['SECURITY_PASSWORD_SALT'])
return email
def redirect_authenticated(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.is_authenticated and current_user.email == get_email_from_token(kwargs.get('token')):
return redirect(url_for('main.index'))
return f(*args, **kwargs)
return decorated_function
@bp.route('/signup', methods=['GET', 'POST'])
async def signup():
form = SignUpForm()
is_busy = bool(User.query.filter_by(email=form.email.data).first())
if form.validate_on_submit() and not is_busy:
res = await send_email(form.email.data, goal='registration')
print(res)
flash(f'To continue registration, follow the link in the letter.', 'info')
return redirect(url_for('main.index'))
elif is_busy:
offer_to_log_in(form.email.data)
return render_template('auth/signup.html', form=form)
@bp.route('/register/<token>', methods=['GET', 'POST'])
@redirect_authenticated
def register(token):
form = RegistrationForm()
email = get_email_from_token(token)
if bool(User.query.filter_by(email=email).first()):
offer_to_log_in(email)
return redirect(url_for('main.index'))
form.email.data = email
if form.validate_on_submit():
new_user = User(
email=email, # noqa
first_name=form.first_name.data, # noqa
last_name=form.last_name.data, # noqa
is_admin=True if email == current_app.config['ADMIN_EMAIL'] else False # noqa
)
new_user.set_password(form.password.data)
print(f'{new_user.is_admin:=}')
db.session.add(new_user)
db.session.commit()
if not os.path.isdir(os.path.join(current_app.config['UPLOAD_PATH'], str(new_user.id))):
os.mkdir(os.path.join(current_app.config['UPLOAD_PATH'], str(new_user.id)))
flash('You can log in', 'success')
return redirect(url_for('main.index'))
return render_template('auth/register.html', form=form)
@bp.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if email := request.args.get('email'):
form.email.data = email
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if not user:
flash(f'User with email {form.email.data} not registered', 'danger')
return redirect(url_for('auth.signup'))
elif not user.check_password(form.password.data):
flash('Wrong password', 'danger')
return redirect(url_for('main.index'))
else:
login_user(user, remember=form.remember_me.data)
flash(f'Hi, {user.first_name}!', 'success')
return redirect(url_for('main.index'))
return render_template('auth/login.html', form=form)
@bp.route('/log_out', methods=['GET', 'POST'])
@login_required
def log_out():
logout_user()
flash('You are logged out', 'info')
return redirect(url_for('main.index'))
@bp.route('/reset_password', methods=['GET', 'POST'])
def reset_password():
form = ResetPasswordForm()
if current_user.is_authenticated:
form.email.data = current_user.email
form.email.render_kw = {'disabled': True}
is_present = bool(User.query.filter_by(email=form.email.data).first())
if form.validate_on_submit():
if is_present:
send_email(form.email.data, goal='reset')
flash('To continue reset password, follow the link in the letter.', 'info')
return redirect(url_for('main.index'))
else:
href = f"""<a href="{url_for("auth.signup", email=form.email.data)}" class="danger-link">Sign up</a>"""
message = f"The email: {form.email.data} not founded. Please {href} or use correct email."
flash(Markup(message), 'danger')
return render_template('auth/signup.html', form=form)
@bp.route('/new_password/<token>', methods=['GET', 'POST'])
def new_password(token):
form = NewPasswordForm()
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = serializer.loads(token, salt=current_app.config['SECURITY_PASSWORD_SALT'])
form.email.data = email
user = User.query.filter_by(email=email).first()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash('Password was changed. You can log in', 'success')
return redirect(url_for('main.index'))
elif form.is_submitted():
return render_template('auth/new_password.html', form=form), 422
return render_template('auth/new_password.html', form=form)
@bp.route('/user_page', methods=['GET', 'POST'])
@login_required
def user_page():
form = UserForm(obj=current_user)
if form.validate_on_submit():
is_changed = False
for field in 'email', 'first_name', 'last_name':
if getattr(form, field).data is not getattr(current_user, field):
setattr(current_user, field, getattr(form, field).data)
is_changed = True
if is_changed:
db.session.commit()
return render_template('auth/user_page.html', form=form)
|
import os
from functools import wraps
from flask import flash, redirect, render_template, url_for, current_app, Markup, request
from flask_login import login_user, login_required, logout_user, current_user
from app.auth import bp
from app.auth.forms import SignUpForm, RegistrationForm, LoginForm, ResetPasswordForm, NewPasswordForm, UserForm
from app.auth.email import send_email
from itsdangerous import URLSafeTimedSerializer
from app.models import User
from app import db
def offer_to_log_in(email: str):
href = f"""<a href="{url_for('auth.login', email=email)}" class="danger-link">Log In</a>"""
message = f"The email: {email} is used. Please {href}."
flash(Markup(message), 'danger')
def get_email_from_token(token):
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = serializer.loads(token, salt=current_app.config['SECURITY_PASSWORD_SALT'])
return email
def redirect_authenticated(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.is_authenticated and current_user.email == get_email_from_token(kwargs.get('token')):
return redirect(url_for('main.index'))
return f(*args, **kwargs)
return decorated_function
@bp.route('/signup', methods=['GET', 'POST'])
async def signup():
form = SignUpForm()
is_busy = bool(User.query.filter_by(email=form.email.data).first())
if form.validate_on_submit() and not is_busy:
res = await send_email(form.email.data, goal='registration')
print(res)
flash(f'To continue registration, follow the link in the letter.', 'info')
return redirect(url_for('main.index'))
elif is_busy:
offer_to_log_in(form.email.data)
return render_template('auth/signup.html', form=form)
@bp.route('/register/<token>', methods=['GET', 'POST'])
@redirect_authenticated
def register(token):
form = RegistrationForm()
email = get_email_from_token(token)
if bool(User.query.filter_by(email=email).first()):
offer_to_log_in(email)
return redirect(url_for('main.index'))
form.email.data = email
if form.validate_on_submit():
new_user = User(
email=email, # noqa
first_name=form.first_name.data, # noqa
last_name=form.last_name.data, # noqa
is_admin=True if email == current_app.config['ADMIN_EMAIL'] else False # noqa
)
new_user.set_password(form.password.data)
print(f'{new_user.is_admin:=}')
db.session.add(new_user)
db.session.commit()
if not os.path.isdir(os.path.join(current_app.config['UPLOAD_PATH'], str(new_user.id))):
os.mkdir(os.path.join(current_app.config['UPLOAD_PATH'], str(new_user.id)))
flash('You can log in', 'success')
return redirect(url_for('main.index'))
return render_template('auth/register.html', form=form)
@bp.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if email := request.args.get('email'):
form.email.data = email
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if not user:
flash(f'User with email {form.email.data} not registered', 'danger')
return redirect(url_for('auth.signup'))
elif not user.check_password(form.password.data):
flash('Wrong password', 'danger')
return redirect(url_for('main.index'))
else:
login_user(user, remember=form.remember_me.data)
flash(f'Hi, {user.first_name}!', 'success')
return redirect(url_for('main.index'))
return render_template('auth/login.html', form=form)
@bp.route('/log_out', methods=['GET', 'POST'])
@login_required
def log_out():
logout_user()
flash('You are logged out', 'info')
return redirect(url_for('main.index'))
@bp.route('/reset_password', methods=['GET', 'POST'])
def reset_password():
form = ResetPasswordForm()
if current_user.is_authenticated:
form.email.data = current_user.email
form.email.render_kw = {'disabled': True}
is_present = bool(User.query.filter_by(email=form.email.data).first())
if form.validate_on_submit():
if is_present:
send_email(form.email.data, goal='reset')
flash('To continue reset password, follow the link in the letter.', 'info')
return redirect(url_for('main.index'))
else:
href = f"""<a href="{url_for('auth.signup', email=form.email.data)}" class="danger-link">Sign up</a>"""
message = f"The email: {form.email.data} not founded. Please {href} or use correct email."
flash(Markup(message), 'danger')
return render_template('auth/signup.html', form=form)
@bp.route('/new_password/<token>', methods=['GET', 'POST'])
def new_password(token):
form = NewPasswordForm()
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = serializer.loads(token, salt=current_app.config['SECURITY_PASSWORD_SALT'])
form.email.data = email
user = User.query.filter_by(email=email).first()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash('Password was changed. You can log in', 'success')
return redirect(url_for('main.index'))
elif form.is_submitted():
return render_template('auth/new_password.html', form=form), 422
return render_template('auth/new_password.html', form=form)
@bp.route('/user_page', methods=['GET', 'POST'])
@login_required
def user_page():
form = UserForm(obj=current_user)
if form.validate_on_submit():
is_changed = False
for field in 'email', 'first_name', 'last_name':
if getattr(form, field).data is not getattr(current_user, field):
setattr(current_user, field, getattr(form, field).data)
is_changed = True
if is_changed:
db.session.commit()
return render_template('auth/user_page.html', form=form)
|
import ads
ads.config.token = 'my token'
import numpy as np
# Filenames
## Enter the filename for first-author publications here:
first_author = "first_author.bib"
## Enter the filename for cd-authored publications here:
co_author = "co_author.bib"
# Function Declarations
def extract_bibcodes(filename):
"""Takes a .bib filename, looks for bibcodes on the first line of each entry, and parses into a list."""
f = open(filename)
full_list = f.readlines()
bibcodes = []
# drop yCat, arxiv, PhDT, and other non-refereed entries
# Workaround, since I couldn't get the API to accept property:refereed or property=refereed to work when searching
exclude = ['arXiv','tmp','yCat','PhDT','AAS','ASPC','BSRSL','conf','EPJWC','IAUFM','IAUGA','IAUS','hst','iue','jwst','spzr','prop']
for line in full_list:
if line[0] == "@":
if not any(x in line for x in exclude):
bibcodes.append(line.split("{")[1].replace(",\n",""))
return bibcodes
def author_format(authors):
if len(authors) == 1:
a = authors[0]
elif len(authors) == 2:
a = authors[0] + " \& " + authors[1]
else:
a = authors[0] + ' et al.'
return a
def journal_name(bibcode):
return bibcode.split(".")[0][4:].replace("&","\&")
def adsurl(bibcode):
return 'https://ui.adsabs.harvard.edu/abs/' + bibcode
def latex_title_greek(title):
greek_dict = {"α":r"$\alpha$", "β":r"$\beta$", "γ":r"$\gamma$", "δ":r"$\delta$", "ε":r"$\epsilon$", "ζ":r"$\zeta$", "η":r"$\eta$", "ι":r"$\iota$", "θ":r"$\theta$", "κ":r"$\kappa$", "λ":r"$\lambda$", "μ":r"$\mu$", "ν":r"$\nu$", "ξ":r"$\xi$", "π":r"$\pi$", "ρ":r"$\rho$", "σ":r"$\sigma$", "τ":r"$\tau$", "φ":r"$\phi$", "χ":r"$\chi$", "ψ":r"$\psi$", "ω":r"$\omega$"}
for key in greek_dict.keys():
title = title.replace(key, greek_dict[key])
return title
def citation_formatter(cites):
if cites is None:
return ""
elif cites == 0:
return ""
else:
if cites < 10:
return f"Cited: \\phantom" + "{1}" + f"{cites}"
else:
return f"Cited: {cites}"
def latex_strings(paper_list):
output = []
n = len(paper_list)
for i,p in enumerate(paper_list):
title = p.title[0]
entry = "\\textbf{" + f"{n-i}" + "}. " + "\\" + "href{" + adsurl(p.bibcode) + "}{" + f"{latex_title_greek(title)}" + "}" + "\\\\"
entry += author_format(p.author)
entry += f" ({p.year}) "
entry += journal_name(p.bibcode)
entry += f" {p.volume},"
entry += f" {p.page[0]}."
entry += ' \\hspace*{\\fill}' + citation_formatter(p.citation_count) + "\\vspace{1mm}" + "\\\\"
output.append(entry)
output[0] = "\\noindent " + output[0]
return output
def export_latex(filename,latex_string_list):
f = open(filename,"w")
for line in latex_string_list:
f.write(line+"\n")
f.close()
return "Saved."
# Parse bibcodes
print("Parsing bibcodes...")
bibcodes = extract_bibcodes(first_author)
co_bibcodes = extract_bibcodes(co_author)
# Search for papers and their attributes from bibcodes
print("Querying the ADS API for paper metadata... This may take a while if there are many entries...")
papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in bibcodes]
co_papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in co_bibcodes]
# Remove Errata
## Because Ew. And if anyone cares about the paper content they'll discover errata when they visit the ADS pages.
print("Dropping Errata, Corrigenda...")
for p in papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
papers.remove(p)
for p in co_papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
co_papers.remove(p)
# Sum citations
first_author_cites = 0
co_author_cites = 0
for p in papers:
if p.citation_count is not None:
first_author_cites += p.citation_count
for p in co_papers:
if p.citation_count is not None:
co_author_cites += p.citation_count
# Compile LaTeX string
print("Compiling LaTeX strings...")
output = latex_strings(papers)
co_output = latex_strings(co_papers)
# Export to LaTeX
print("Exporting to LaTeX...")
export_latex("first_author.tex",output)
export_latex("co_author.tex",co_output)
print(f"\nThere are {len(papers)} first-author papers, and {len(co_papers)} co-authored papers.")
print(f"They have a total of {first_author_cites} and {co_author_cites} citations, respectively.")
print("\n\n.tex files prepared. Now run:\n")
print("\t pdflatex publications.tex\n\n\n")
|
import ads
ads.config.token = 'my token'
import numpy as np
# Filenames
## Enter the filename for first-author publications here:
first_author = "first_author.bib"
## Enter the filename for cd-authored publications here:
co_author = "co_author.bib"
# Function Declarations
def extract_bibcodes(filename):
"""Takes a .bib filename, looks for bibcodes on the first line of each entry, and parses into a list."""
f = open(filename)
full_list = f.readlines()
bibcodes = []
# drop yCat, arxiv, PhDT, and other non-refereed entries
# Workaround, since I couldn't get the API to accept property:refereed or property=refereed to work when searching
exclude = ['arXiv','tmp','yCat','PhDT','AAS','ASPC','BSRSL','conf','EPJWC','IAUFM','IAUGA','IAUS','hst','iue','jwst','spzr','prop']
for line in full_list:
if line[0] == "@":
if not any(x in line for x in exclude):
bibcodes.append(line.split("{")[1].replace(",\n",""))
return bibcodes
def author_format(authors):
if len(authors) == 1:
a = authors[0]
elif len(authors) == 2:
a = authors[0] + " \& " + authors[1]
else:
a = authors[0] + ' et al.'
return a
def journal_name(bibcode):
return bibcode.split(".")[0][4:].replace("&","\&")
def adsurl(bibcode):
return 'https://ui.adsabs.harvard.edu/abs/' + bibcode
def latex_title_greek(title):
greek_dict = {"α":r"$\alpha$", "β":r"$\beta$", "γ":r"$\gamma$", "δ":r"$\delta$", "ε":r"$\epsilon$", "ζ":r"$\zeta$", "η":r"$\eta$", "ι":r"$\iota$", "θ":r"$\theta$", "κ":r"$\kappa$", "λ":r"$\lambda$", "μ":r"$\mu$", "ν":r"$\nu$", "ξ":r"$\xi$", "π":r"$\pi$", "ρ":r"$\rho$", "σ":r"$\sigma$", "τ":r"$\tau$", "φ":r"$\phi$", "χ":r"$\chi$", "ψ":r"$\psi$", "ω":r"$\omega$"}
for key in greek_dict.keys():
title = title.replace(key, greek_dict[key])
return title
def citation_formatter(cites):
if cites is None:
return ""
elif cites == 0:
return ""
else:
if cites < 10:
return f"Cited: \\phantom" + "{1}" + f"{cites}"
else:
return f"Cited: {cites}"
def latex_strings(paper_list):
output = []
n = len(paper_list)
for i,p in enumerate(paper_list):
title = p.title[0]
entry = "\\textbf{" + f"{n-i}" + "}. " + '\\' + 'href{' + adsurl(p.bibcode) + "}{" + f"{latex_title_greek(title)}" + "}" + "\\\\"
entry += author_format(p.author)
entry += f" ({p.year}) "
entry += journal_name(p.bibcode)
entry += f" {p.volume},"
entry += f" {p.page[0]}."
entry += ' \\hspace*{\\fill}' + citation_formatter(p.citation_count) + "\\vspace{1mm}" + "\\\\"
output.append(entry)
output[0] = "\\noindent " + output[0]
return output
def export_latex(filename,latex_string_list):
f = open(filename,"w")
for line in latex_string_list:
f.write(line+"\n")
f.close()
return "Saved."
# Parse bibcodes
print("Parsing bibcodes...")
bibcodes = extract_bibcodes(first_author)
co_bibcodes = extract_bibcodes(co_author)
# Search for papers and their attributes from bibcodes
print("Querying the ADS API for paper metadata... This may take a while if there are many entries...")
papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in bibcodes]
co_papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in co_bibcodes]
# Remove Errata
## Because Ew. And if anyone cares about the paper content they'll discover errata when they visit the ADS pages.
print("Dropping Errata, Corrigenda...")
for p in papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
papers.remove(p)
for p in co_papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
co_papers.remove(p)
# Sum citations
first_author_cites = 0
co_author_cites = 0
for p in papers:
if p.citation_count is not None:
first_author_cites += p.citation_count
for p in co_papers:
if p.citation_count is not None:
co_author_cites += p.citation_count
# Compile LaTeX string
print("Compiling LaTeX strings...")
output = latex_strings(papers)
co_output = latex_strings(co_papers)
# Export to LaTeX
print("Exporting to LaTeX...")
export_latex("first_author.tex",output)
export_latex("co_author.tex",co_output)
print(f"\nThere are {len(papers)} first-author papers, and {len(co_papers)} co-authored papers.")
print(f"They have a total of {first_author_cites} and {co_author_cites} citations, respectively.")
print("\n\n.tex files prepared. Now run:\n")
print("\t pdflatex publications.tex\n\n\n")
|
import traceback
import argparse
import numpy as np
from src import NeuralNetwork, generateExample, getTensorExample
from typing import *
def get_args() -> argparse.Namespace:
"""Set-up the argument parser
Returns:
argparse.Namespace:
"""
parser = argparse.ArgumentParser(
description='Project 2 for the Deep Learning class (COSC 525). '
'Involves the development of a Convolutional Neural Network.',
add_help=False)
# Required Args
required_args = parser.add_argument_group('Required Arguments')
required_args.add_argument('-d', '--dataset', required=True,
help="The datasets to train the network on. "
"Options: [example1, example2, example3]")
# Optional args
optional_args = parser.add_argument_group('Optional Arguments')
optional_args.add_argument("-h", "--help", action="help", help="Show this help message and exit")
return parser.parse_args()
def main():
"""This is the main function of main.py
Example:
python main.py --dataset example1
"""
# Initializing
args = get_args()
# Load the configurations
dataset_type = args.dataset
if dataset_type in ('example1', 'example2', 'example3'):
example_num = int(dataset_type[-1])
inputs, targets, layers = generateExample(example_num)
getTensorExample(example_num)
else:
raise ValueError('Invalid dataset type')
# ------- Start of Code ------- #
# # Initialize the network # #
netWork = NeuralNetwork(input_size=inputs.shape, loss_function="square_error",
learning_rate=100, input_channels=1)
# Add layers
for layer in layers:
if layer['type'] == 'Conv':
weights = []
for k_ind in range(layer['num_kernels']):
kernels = [k_w.flatten() for k_w in layer['weights'][k_ind]]
kernel_weights = np.concatenate((*kernels,
layer['biases'][k_ind]))
weights.append(kernel_weights)
weights = np.array(weights)
netWork.addConvLayer(num_kernels=layer['num_kernels'],
kernel_size=layer['kernel_size'],
activation=layer['activation'],
weights=weights)
elif layer['type'] == 'Flat':
netWork.addFlattenLayer()
elif layer['type'] == 'MaxPool':
netWork.addMaxPoolLayer(kernel_size=layer['kernel_size'])
elif layer['type'] == 'Dense':
weights = np.array([np.concatenate((layer['weights'].flatten(), layer['bias']))])
netWork.addFCLayer(num_neurons=targets.shape[0],
activation=layer['activation'],
weights=weights)
else:
raise ValueError(f'Invalid layer type: {layer['type']}')
# # Train the network # #
# First Feed forward
outputs = netWork.calculate(inputs=inputs)
print("----------- Custom Model -----------")
print(f"model output before:\n{outputs}")
# Calculate Loss derivative
loss_der = netWork.loss_derivative(outputs, targets)
loss = netWork.calculate_loss(np.array([inputs]), targets)
netWork.train(np.array([inputs]), targets) # Train the network
outputs = netWork.calculate(inputs=inputs)
print(f"model output after: \n{outputs}")
if example_num == 1:
print('1st convolutional layer, kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[2].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[2].neurons[0].weights[-1]]))
elif example_num == 2:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('2nd convolutional layer, 1st kernel weights:')
print(netWork.layers[1].kernels[0][0][0].weights[:-1].reshape((2, 3, 3)))
print('2nd convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[1].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
elif example_num == 3:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
else:
raise ValueError(f'Invalid example number: {example_num}')
if __name__ == '__main__':
try:
main()
except Exception as e:
print(str(e) + '\n' + str(traceback.format_exc()))
raise e
# # First Layer (Convolutional)
# weights_L1 = np.array(
# [np.concatenate((l1k1.flatten(), l1b1)), np.concatenate((l1k2.flatten(), l1b2))])
# netWork.addConvLayer(num_kernels=2, kernel_size=3, activation="logistic", weights=weights_L1)
# # Second Layer (Convolutional)
# weights_L2 = np.array([np.concatenate((l2c1.flatten(), l2c2.flatten(), l2b))])
# netWork.addConvLayer(num_kernels=1, kernel_size=3, activation="logistic", weights=weights_L2)
# # Third Layer (Fully Connected)
# netWork.addFlattenLayer()
# weights_L3 = np.array([np.concatenate((l3.flatten(), l3b))])
# netWork.addFCLayer(num_neurons=1, activation="logistic", weights=weights_L3)
|
import traceback
import argparse
import numpy as np
from src import NeuralNetwork, generateExample, getTensorExample
from typing import *
def get_args() -> argparse.Namespace:
"""Set-up the argument parser
Returns:
argparse.Namespace:
"""
parser = argparse.ArgumentParser(
description='Project 2 for the Deep Learning class (COSC 525). '
'Involves the development of a Convolutional Neural Network.',
add_help=False)
# Required Args
required_args = parser.add_argument_group('Required Arguments')
required_args.add_argument('-d', '--dataset', required=True,
help="The datasets to train the network on. "
"Options: [example1, example2, example3]")
# Optional args
optional_args = parser.add_argument_group('Optional Arguments')
optional_args.add_argument("-h", "--help", action="help", help="Show this help message and exit")
return parser.parse_args()
def main():
"""This is the main function of main.py
Example:
python main.py --dataset example1
"""
# Initializing
args = get_args()
# Load the configurations
dataset_type = args.dataset
if dataset_type in ('example1', 'example2', 'example3'):
example_num = int(dataset_type[-1])
inputs, targets, layers = generateExample(example_num)
getTensorExample(example_num)
else:
raise ValueError('Invalid dataset type')
# ------- Start of Code ------- #
# # Initialize the network # #
netWork = NeuralNetwork(input_size=inputs.shape, loss_function="square_error",
learning_rate=100, input_channels=1)
# Add layers
for layer in layers:
if layer['type'] == 'Conv':
weights = []
for k_ind in range(layer['num_kernels']):
kernels = [k_w.flatten() for k_w in layer['weights'][k_ind]]
kernel_weights = np.concatenate((*kernels,
layer['biases'][k_ind]))
weights.append(kernel_weights)
weights = np.array(weights)
netWork.addConvLayer(num_kernels=layer['num_kernels'],
kernel_size=layer['kernel_size'],
activation=layer['activation'],
weights=weights)
elif layer['type'] == 'Flat':
netWork.addFlattenLayer()
elif layer['type'] == 'MaxPool':
netWork.addMaxPoolLayer(kernel_size=layer['kernel_size'])
elif layer['type'] == 'Dense':
weights = np.array([np.concatenate((layer['weights'].flatten(), layer['bias']))])
netWork.addFCLayer(num_neurons=targets.shape[0],
activation=layer['activation'],
weights=weights)
else:
raise ValueError(f'Invalid layer type: {layer["type"]}')
# # Train the network # #
# First Feed forward
outputs = netWork.calculate(inputs=inputs)
print("----------- Custom Model -----------")
print(f"model output before:\n{outputs}")
# Calculate Loss derivative
loss_der = netWork.loss_derivative(outputs, targets)
loss = netWork.calculate_loss(np.array([inputs]), targets)
netWork.train(np.array([inputs]), targets) # Train the network
outputs = netWork.calculate(inputs=inputs)
print(f"model output after: \n{outputs}")
if example_num == 1:
print('1st convolutional layer, kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[2].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[2].neurons[0].weights[-1]]))
elif example_num == 2:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('2nd convolutional layer, 1st kernel weights:')
print(netWork.layers[1].kernels[0][0][0].weights[:-1].reshape((2, 3, 3)))
print('2nd convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[1].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
elif example_num == 3:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
else:
raise ValueError(f'Invalid example number: {example_num}')
if __name__ == '__main__':
try:
main()
except Exception as e:
print(str(e) + '\n' + str(traceback.format_exc()))
raise e
# # First Layer (Convolutional)
# weights_L1 = np.array(
# [np.concatenate((l1k1.flatten(), l1b1)), np.concatenate((l1k2.flatten(), l1b2))])
# netWork.addConvLayer(num_kernels=2, kernel_size=3, activation="logistic", weights=weights_L1)
# # Second Layer (Convolutional)
# weights_L2 = np.array([np.concatenate((l2c1.flatten(), l2c2.flatten(), l2b))])
# netWork.addConvLayer(num_kernels=1, kernel_size=3, activation="logistic", weights=weights_L2)
# # Third Layer (Fully Connected)
# netWork.addFlattenLayer()
# weights_L3 = np.array([np.concatenate((l3.flatten(), l3b))])
# netWork.addFCLayer(num_neurons=1, activation="logistic", weights=weights_L3)
|
import os
from unittest import TestCase
import pytest
from dotenv import load_dotenv
from pytest import skip
from osbot_utils.utils.Files import file_not_exists
from osbot_k8s.kubernetes.Ssh import Ssh
@pytest.mark.skip('needs live server') # todo add to test setup the creation of pods and nodes we can SSH into
class test_Ssh(TestCase):
def setUp(self) -> None:
load_dotenv()
self.ssh_config = {
"user" : os.environ.get('TEST_SSH_USER' ),
"server" : os.environ.get('TEST_SSH_SERVER'),
"ssh_key" : os.environ.get('TEST_SSH_KEY' )
}
if file_not_exists(self.ssh_config.get('ssh_key')):
skip('no ssh key in current test environment')
self.ssh = Ssh(ssh_config=self.ssh_config)
print()
# base methods
def test_server_in_known_hosts(self):
result = self.ssh.server_in_known_hosts() # todo: add method to programatically add the server to the known_hosts file
assert type(result) is bool
def test_exec_ssh_command(self):
assert self.ssh.exec_ssh_command('uname') == {'error': '', 'output': 'Linux\n', 'status': True}
assert self.ssh.exec_ssh_command('aaaa' ) == {'error': 'bash: aaaa: command not found\n', 'output': '', 'status': False}
def test_get_get_scp_params(self):
source_file = 'source_file'
target_file = 'target_file'
ssh_params = self.ssh.get_scp_params(source_file, target_file)
assert ssh_params == ['-i', self.ssh_config.get('ssh_key'),
f"{self.ssh_config.get("user")}@{self.ssh_config.get("server")}:{source_file}",
target_file]
def test_get_get_ssh_params(self):
ssh_params = self.ssh.get_ssh_params('aaa')
assert ssh_params == ['-o StrictHostKeyChecking=no',
'-t', '-i', self.ssh_config.get('ssh_key'),
self.ssh_config.get('user') + '@' + self.ssh_config.get('server'),
'aaa']
def test_exec(self):
assert 'bin' in self.ssh.exec('cd /; ls')
# helper methods
def test_uname(self):
assert self.ssh.uname() == 'Linux'
# def create_pods(self, count):
# return self.ssh.exec(f'/home/ubuntu/icap-infrastructure/scripts/create_pod.sh {count}')
#
# def test_created_pod(self):
# self.create_pods(1)
# #assert 'bin' in self.ssh.exec('ls')
# # helper methods: esxcli
|
import os
from unittest import TestCase
import pytest
from dotenv import load_dotenv
from pytest import skip
from osbot_utils.utils.Files import file_not_exists
from osbot_k8s.kubernetes.Ssh import Ssh
@pytest.mark.skip('needs live server') # todo add to test setup the creation of pods and nodes we can SSH into
class test_Ssh(TestCase):
def setUp(self) -> None:
load_dotenv()
self.ssh_config = {
"user" : os.environ.get('TEST_SSH_USER' ),
"server" : os.environ.get('TEST_SSH_SERVER'),
"ssh_key" : os.environ.get('TEST_SSH_KEY' )
}
if file_not_exists(self.ssh_config.get('ssh_key')):
skip('no ssh key in current test environment')
self.ssh = Ssh(ssh_config=self.ssh_config)
print()
# base methods
def test_server_in_known_hosts(self):
result = self.ssh.server_in_known_hosts() # todo: add method to programatically add the server to the known_hosts file
assert type(result) is bool
def test_exec_ssh_command(self):
assert self.ssh.exec_ssh_command('uname') == {'error': '', 'output': 'Linux\n', 'status': True}
assert self.ssh.exec_ssh_command('aaaa' ) == {'error': 'bash: aaaa: command not found\n', 'output': '', 'status': False}
def test_get_get_scp_params(self):
source_file = 'source_file'
target_file = 'target_file'
ssh_params = self.ssh.get_scp_params(source_file, target_file)
assert ssh_params == ['-i', self.ssh_config.get('ssh_key'),
f"{self.ssh_config.get('user')}@{self.ssh_config.get('server')}:{source_file}",
target_file]
def test_get_get_ssh_params(self):
ssh_params = self.ssh.get_ssh_params('aaa')
assert ssh_params == ['-o StrictHostKeyChecking=no',
'-t', '-i', self.ssh_config.get('ssh_key'),
self.ssh_config.get('user') + '@' + self.ssh_config.get('server'),
'aaa']
def test_exec(self):
assert 'bin' in self.ssh.exec('cd /; ls')
# helper methods
def test_uname(self):
assert self.ssh.uname() == 'Linux'
# def create_pods(self, count):
# return self.ssh.exec(f'/home/ubuntu/icap-infrastructure/scripts/create_pod.sh {count}')
#
# def test_created_pod(self):
# self.create_pods(1)
# #assert 'bin' in self.ssh.exec('ls')
# # helper methods: esxcli
|
import datetime
import json
import logging
import os
import queue
import subprocess as sp
import threading
import time
from collections import defaultdict
from pathlib import Path
import psutil
import shutil
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.models import Event
from peewee import fn
logger = logging.getLogger(__name__)
class EventProcessor(threading.Thread):
def __init__(
self, config, camera_processes, event_queue, event_processed_queue, stop_event
):
threading.Thread.__init__(self)
self.name = "event_processor"
self.config = config
self.camera_processes = camera_processes
self.cached_clips = {}
self.event_queue = event_queue
self.event_processed_queue = event_processed_queue
self.events_in_process = {}
self.stop_event = stop_event
def should_create_clip(self, camera, event_data):
if event_data["false_positive"]:
return False
# if there are required zones and there is no overlap
required_zones = self.config.cameras[camera].clips.required_zones
if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set(
required_zones
):
logger.debug(
f"Not creating clip for {event_data["id"]} because it did not enter required zones"
)
return False
return True
def refresh_cache(self):
cached_files = os.listdir(CACHE_DIR)
files_in_use = []
for process in psutil.process_iter():
try:
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split("/")[-1])
except:
continue
for f in cached_files:
if f in files_in_use or f in self.cached_clips:
continue
basename = os.path.splitext(f)[0]
camera, date = basename.rsplit("-", maxsplit=1)
start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{os.path.join(CACHE_DIR, f)}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0:
duration = float(p.stdout.decode().strip())
else:
logger.info(f"bad file: {f}")
os.remove(os.path.join(CACHE_DIR, f))
continue
self.cached_clips[f] = {
"path": f,
"camera": camera,
"start_time": start_time.timestamp(),
"duration": duration,
}
if len(self.events_in_process) > 0:
earliest_event = min(
self.events_in_process.values(), key=lambda x: x["start_time"]
)["start_time"]
else:
earliest_event = datetime.datetime.now().timestamp()
# if the earliest event is more tha max seconds ago, cap it
max_seconds = self.config.clips.max_seconds
earliest_event = max(
earliest_event,
datetime.datetime.now().timestamp() - self.config.clips.max_seconds,
)
for f, data in list(self.cached_clips.items()):
if earliest_event - 90 > data["start_time"] + data["duration"]:
del self.cached_clips[f]
logger.debug(f"Cleaning up cached file {f}")
os.remove(os.path.join(CACHE_DIR, f))
# if we are still using more than 90% of the cache, proactively cleanup
cache_usage = shutil.disk_usage("/tmp/cache")
if (
cache_usage.used / cache_usage.total > 0.9
and cache_usage.free < 200000000
and len(self.cached_clips) > 0
):
logger.warning("More than 90% of the cache is used.")
logger.warning(
"Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config."
)
logger.warning("Proactively cleaning up the cache...")
while cache_usage.used / cache_usage.total > 0.9:
oldest_clip = min(
self.cached_clips.values(), key=lambda x: x["start_time"]
)
del self.cached_clips[oldest_clip["path"]]
os.remove(os.path.join(CACHE_DIR, oldest_clip["path"]))
cache_usage = shutil.disk_usage("/tmp/cache")
def create_clip(self, camera, event_data, pre_capture, post_capture):
# get all clips from the camera with the event sorted
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
# if there are no clips in the cache or we are still waiting on a needed file check every 5 seconds
wait_count = 0
while (
len(sorted_clips) == 0
or sorted_clips[-1]["start_time"] + sorted_clips[-1]["duration"]
< event_data["end_time"] + post_capture
):
if wait_count > 4:
logger.warning(
f"Unable to create clip for {camera} and event {event_data["id"]}. There were no cache files for this event."
)
return False
logger.debug(f"No cache clips for {camera}. Waiting...")
time.sleep(5)
self.refresh_cache()
# get all clips from the camera with the event sorted
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
wait_count += 1
playlist_start = event_data["start_time"] - pre_capture
playlist_end = event_data["end_time"] + post_capture
playlist_lines = []
for clip in sorted_clips:
# clip ends before playlist start time, skip
if clip["start_time"] + clip["duration"] < playlist_start:
continue
# clip starts after playlist ends, finish
if clip["start_time"] > playlist_end:
break
playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip["path"])}'")
# if this is the starting clip, add an inpoint
if clip["start_time"] < playlist_start:
playlist_lines.append(
f"inpoint {int(playlist_start-clip["start_time"])}"
)
# if this is the ending clip, add an outpoint
if clip["start_time"] + clip["duration"] > playlist_end:
playlist_lines.append(
f"outpoint {int(playlist_end-clip["start_time"])}"
)
clip_name = f"{camera}-{event_data["id"]}"
ffmpeg_cmd = [
"ffmpeg",
"-y",
"-protocol_whitelist",
"pipe,file",
"-f",
"concat",
"-safe",
"0",
"-i",
"-",
"-c",
"copy",
"-movflags",
"+faststart",
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4",
]
p = sp.run(
ffmpeg_cmd,
input="\n".join(playlist_lines),
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(p.stderr)
return False
return True
def run(self):
while not self.stop_event.is_set():
try:
event_type, camera, event_data = self.event_queue.get(timeout=10)
except queue.Empty:
if not self.stop_event.is_set():
self.refresh_cache()
continue
logger.debug(f"Event received: {event_type} {camera} {event_data["id"]}")
self.refresh_cache()
if event_type == "start":
self.events_in_process[event_data["id"]] = event_data
if event_type == "end":
clips_config = self.config.cameras[camera].clips
clip_created = False
if self.should_create_clip(camera, event_data):
if clips_config.enabled and (
clips_config.objects is None
or event_data["label"] in clips_config.objects
):
clip_created = self.create_clip(
camera,
event_data,
clips_config.pre_capture,
clips_config.post_capture,
)
if clip_created or event_data["has_snapshot"]:
Event.create(
id=event_data["id"],
label=event_data["label"],
camera=camera,
start_time=event_data["start_time"],
end_time=event_data["end_time"],
top_score=event_data["top_score"],
false_positive=event_data["false_positive"],
zones=list(event_data["entered_zones"]),
thumbnail=event_data["thumbnail"],
has_clip=clip_created,
has_snapshot=event_data["has_snapshot"],
)
del self.events_in_process[event_data["id"]]
self.event_processed_queue.put((event_data["id"], camera))
logger.info(f"Exiting event processor...")
class EventCleanup(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
threading.Thread.__init__(self)
self.name = "event_cleanup"
self.config = config
self.stop_event = stop_event
self.camera_keys = list(self.config.cameras.keys())
def expire(self, media_type):
## Expire events from unlisted cameras based on the global config
if media_type == 'clips':
retain_config = self.config.clips.retain
file_extension = "mp4"
update_params = {"has_clip": False}
else:
retain_config = self.config.snapshots.retain
file_extension = "jpg"
update_params = {"has_snapshot": False}
distinct_labels = (
Event.select(Event.label)
.where(Event.camera.not_in(self.camera_keys))
.distinct()
)
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events = Event.select().where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
# delete the media from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = Event.update(update_params).where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items():
if media_type == 'clips':
retain_config = camera.clips.retain
else:
retain_config = camera.snapshots.retain
# get distinct objects in database for this camera
distinct_labels = (
Event.select(Event.label).where(Event.camera == name).distinct()
)
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events = Event.select().where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
# delete the grabbed clips from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = Event.update(update_params).where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
def purge_duplicates(self):
duplicate_query = """with grouped_events as (
select id,
label,
camera,
has_snapshot,
has_clip,
row_number() over (
partition by label, camera, round(start_time/5,0)*5
order by end_time-start_time desc
) as copy_number
from event
)
select distinct id, camera, has_snapshot, has_clip from grouped_events
where copy_number > 1;"""
duplicate_events = Event.raw(duplicate_query)
for event in duplicate_events:
logger.debug(f"Removing duplicate: {event.id}")
media_name = f"{event.camera}-{event.id}"
if event.has_snapshot:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
media_path.unlink(missing_ok=True)
if event.has_clip:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media_path.unlink(missing_ok=True)
(
Event.delete()
.where(Event.id << [event.id for event in duplicate_events])
.execute()
)
def run(self):
# only expire events every 5 minutes
while not self.stop_event.wait(300):
self.expire("clips")
self.expire("snapshots")
self.purge_duplicates()
# drop events from db where has_clip and has_snapshot are false
delete_query = Event.delete().where(
Event.has_clip == False, Event.has_snapshot == False
)
delete_query.execute()
logger.info(f"Exiting event cleanup...")
|
import datetime
import json
import logging
import os
import queue
import subprocess as sp
import threading
import time
from collections import defaultdict
from pathlib import Path
import psutil
import shutil
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.models import Event
from peewee import fn
logger = logging.getLogger(__name__)
class EventProcessor(threading.Thread):
def __init__(
self, config, camera_processes, event_queue, event_processed_queue, stop_event
):
threading.Thread.__init__(self)
self.name = "event_processor"
self.config = config
self.camera_processes = camera_processes
self.cached_clips = {}
self.event_queue = event_queue
self.event_processed_queue = event_processed_queue
self.events_in_process = {}
self.stop_event = stop_event
def should_create_clip(self, camera, event_data):
if event_data["false_positive"]:
return False
# if there are required zones and there is no overlap
required_zones = self.config.cameras[camera].clips.required_zones
if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set(
required_zones
):
logger.debug(
f"Not creating clip for {event_data['id']} because it did not enter required zones"
)
return False
return True
def refresh_cache(self):
cached_files = os.listdir(CACHE_DIR)
files_in_use = []
for process in psutil.process_iter():
try:
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split("/")[-1])
except:
continue
for f in cached_files:
if f in files_in_use or f in self.cached_clips:
continue
basename = os.path.splitext(f)[0]
camera, date = basename.rsplit("-", maxsplit=1)
start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{os.path.join(CACHE_DIR, f)}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0:
duration = float(p.stdout.decode().strip())
else:
logger.info(f"bad file: {f}")
os.remove(os.path.join(CACHE_DIR, f))
continue
self.cached_clips[f] = {
"path": f,
"camera": camera,
"start_time": start_time.timestamp(),
"duration": duration,
}
if len(self.events_in_process) > 0:
earliest_event = min(
self.events_in_process.values(), key=lambda x: x["start_time"]
)["start_time"]
else:
earliest_event = datetime.datetime.now().timestamp()
# if the earliest event is more tha max seconds ago, cap it
max_seconds = self.config.clips.max_seconds
earliest_event = max(
earliest_event,
datetime.datetime.now().timestamp() - self.config.clips.max_seconds,
)
for f, data in list(self.cached_clips.items()):
if earliest_event - 90 > data["start_time"] + data["duration"]:
del self.cached_clips[f]
logger.debug(f"Cleaning up cached file {f}")
os.remove(os.path.join(CACHE_DIR, f))
# if we are still using more than 90% of the cache, proactively cleanup
cache_usage = shutil.disk_usage("/tmp/cache")
if (
cache_usage.used / cache_usage.total > 0.9
and cache_usage.free < 200000000
and len(self.cached_clips) > 0
):
logger.warning("More than 90% of the cache is used.")
logger.warning(
"Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config."
)
logger.warning("Proactively cleaning up the cache...")
while cache_usage.used / cache_usage.total > 0.9:
oldest_clip = min(
self.cached_clips.values(), key=lambda x: x["start_time"]
)
del self.cached_clips[oldest_clip["path"]]
os.remove(os.path.join(CACHE_DIR, oldest_clip["path"]))
cache_usage = shutil.disk_usage("/tmp/cache")
def create_clip(self, camera, event_data, pre_capture, post_capture):
# get all clips from the camera with the event sorted
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
# if there are no clips in the cache or we are still waiting on a needed file check every 5 seconds
wait_count = 0
while (
len(sorted_clips) == 0
or sorted_clips[-1]["start_time"] + sorted_clips[-1]["duration"]
< event_data["end_time"] + post_capture
):
if wait_count > 4:
logger.warning(
f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event."
)
return False
logger.debug(f"No cache clips for {camera}. Waiting...")
time.sleep(5)
self.refresh_cache()
# get all clips from the camera with the event sorted
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
wait_count += 1
playlist_start = event_data["start_time"] - pre_capture
playlist_end = event_data["end_time"] + post_capture
playlist_lines = []
for clip in sorted_clips:
# clip ends before playlist start time, skip
if clip["start_time"] + clip["duration"] < playlist_start:
continue
# clip starts after playlist ends, finish
if clip["start_time"] > playlist_end:
break
playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'")
# if this is the starting clip, add an inpoint
if clip["start_time"] < playlist_start:
playlist_lines.append(
f"inpoint {int(playlist_start-clip['start_time'])}"
)
# if this is the ending clip, add an outpoint
if clip["start_time"] + clip["duration"] > playlist_end:
playlist_lines.append(
f"outpoint {int(playlist_end-clip['start_time'])}"
)
clip_name = f"{camera}-{event_data['id']}"
ffmpeg_cmd = [
"ffmpeg",
"-y",
"-protocol_whitelist",
"pipe,file",
"-f",
"concat",
"-safe",
"0",
"-i",
"-",
"-c",
"copy",
"-movflags",
"+faststart",
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4",
]
p = sp.run(
ffmpeg_cmd,
input="\n".join(playlist_lines),
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(p.stderr)
return False
return True
def run(self):
while not self.stop_event.is_set():
try:
event_type, camera, event_data = self.event_queue.get(timeout=10)
except queue.Empty:
if not self.stop_event.is_set():
self.refresh_cache()
continue
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
self.refresh_cache()
if event_type == "start":
self.events_in_process[event_data["id"]] = event_data
if event_type == "end":
clips_config = self.config.cameras[camera].clips
clip_created = False
if self.should_create_clip(camera, event_data):
if clips_config.enabled and (
clips_config.objects is None
or event_data["label"] in clips_config.objects
):
clip_created = self.create_clip(
camera,
event_data,
clips_config.pre_capture,
clips_config.post_capture,
)
if clip_created or event_data["has_snapshot"]:
Event.create(
id=event_data["id"],
label=event_data["label"],
camera=camera,
start_time=event_data["start_time"],
end_time=event_data["end_time"],
top_score=event_data["top_score"],
false_positive=event_data["false_positive"],
zones=list(event_data["entered_zones"]),
thumbnail=event_data["thumbnail"],
has_clip=clip_created,
has_snapshot=event_data["has_snapshot"],
)
del self.events_in_process[event_data["id"]]
self.event_processed_queue.put((event_data["id"], camera))
logger.info(f"Exiting event processor...")
class EventCleanup(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
threading.Thread.__init__(self)
self.name = "event_cleanup"
self.config = config
self.stop_event = stop_event
self.camera_keys = list(self.config.cameras.keys())
def expire(self, media_type):
## Expire events from unlisted cameras based on the global config
if media_type == 'clips':
retain_config = self.config.clips.retain
file_extension = "mp4"
update_params = {"has_clip": False}
else:
retain_config = self.config.snapshots.retain
file_extension = "jpg"
update_params = {"has_snapshot": False}
distinct_labels = (
Event.select(Event.label)
.where(Event.camera.not_in(self.camera_keys))
.distinct()
)
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events = Event.select().where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
# delete the media from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = Event.update(update_params).where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items():
if media_type == 'clips':
retain_config = camera.clips.retain
else:
retain_config = camera.snapshots.retain
# get distinct objects in database for this camera
distinct_labels = (
Event.select(Event.label).where(Event.camera == name).distinct()
)
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events = Event.select().where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
# delete the grabbed clips from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = Event.update(update_params).where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
def purge_duplicates(self):
duplicate_query = """with grouped_events as (
select id,
label,
camera,
has_snapshot,
has_clip,
row_number() over (
partition by label, camera, round(start_time/5,0)*5
order by end_time-start_time desc
) as copy_number
from event
)
select distinct id, camera, has_snapshot, has_clip from grouped_events
where copy_number > 1;"""
duplicate_events = Event.raw(duplicate_query)
for event in duplicate_events:
logger.debug(f"Removing duplicate: {event.id}")
media_name = f"{event.camera}-{event.id}"
if event.has_snapshot:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
media_path.unlink(missing_ok=True)
if event.has_clip:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media_path.unlink(missing_ok=True)
(
Event.delete()
.where(Event.id << [event.id for event in duplicate_events])
.execute()
)
def run(self):
# only expire events every 5 minutes
while not self.stop_event.wait(300):
self.expire("clips")
self.expire("snapshots")
self.purge_duplicates()
# drop events from db where has_clip and has_snapshot are false
delete_query = Event.delete().where(
Event.has_clip == False, Event.has_snapshot == False
)
delete_query.execute()
logger.info(f"Exiting event cleanup...")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import time
from contextlib import ExitStack as contextlib_ExitStack
from typing import Any, Iterable, List, Optional, Tuple
import torch
from pytext.common.constants import BatchContext, Stage
from pytext.config import PyTextConfig
from pytext.config.component import (
Component,
ComponentType,
create_optimizer,
create_scheduler,
create_sparsifier,
)
from pytext.config.pytext_config import ConfigBase
from pytext.data.data_handler import BatchIterator
from pytext.metric_reporters import MetricReporter
from pytext.models.distributed_model import DistributedModel
from pytext.models.model import Model
from pytext.optimizer import Adam, Optimizer, learning_rates
from pytext.optimizer.scheduler import Scheduler
from pytext.optimizer.sparsifier import Sparsifier
from pytext.task.serialize import save
from pytext.trainers.training_state import TrainingState
from pytext.utils import cuda, precision, timing
class TrainerBase(Component):
__COMPONENT_TYPE__ = ComponentType.TRAINER
def cycle(iterator: Iterable[Any]) -> Iterable[Any]:
"""Like itertools.cycle, but will call iter on the original iterable instead.
This limits it to not be able to run on say raw generators, but also doesn't
store a copy of the iterable in memory for repetition."""
while True:
yield from iterator
def maybe_accumulate_gradients(exit_stack, model, index, sample_size):
# index == sample_size - 1 represents the last backward pass
if (
cuda.DISTRIBUTED_WORLD_SIZE > 1
and hasattr(model, "no_sync")
and index < sample_size - 1
):
"""
Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),
we want to accumulate gradients locally and only call all-reduce in the
last backwards pass.
"""
exit_stack.enter_context(model.no_sync())
if precision._FP16_ENABLED and index < sample_size - 1:
"""
Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),
we want to accumulate gradients in FP16 parameters (e.g delay unscale)
and only unscale to FP32 parameters after the last backward pass.
"""
exit_stack.enter_context(precision.delay_unscale())
class Trainer(TrainerBase):
"""
Base Trainer class that provide ways to
1 Train model, compute metrics against eval set and use the metrics for
model selection.
2 Test trained model, compute and publish metrics against a blind test set.
Attributes:
epochs (int): Training epochs
early_stop_after (int): Stop after how many epochs when the eval metric
is not improving
max_clip_norm (Optional[float]): Clip gradient norm if set
report_train_metrics (bool): Whether metrics on training data should be
computed and reported.
target_time_limit_seconds (float): Target time limit for training in seconds. If
the expected time to train another epoch exceeds this limit, stop training.
"""
class Config(ConfigBase):
#: Training epochs
epochs: int = 10
#: Stop after how many epochs when the eval metric is not improving
early_stop_after: int = 0
#: Clip gradient norm if set
max_clip_norm: Optional[float] = None
#: Whether metrics on training data should be computed and reported.
report_train_metrics: bool = True
#: Target time limit for training, default (None) to no time limit.
target_time_limit_seconds: Optional[int] = None
#: Whether to do evaluation and model selection based on it.
do_eval: bool = True
#: Number of samples for logging training progress.
num_samples_to_log_progress: int = 1000
#: Number of forward & backward per batch before update gradients, the
#: actual_batch_size = batch_size x num_accumulated_batches
num_accumulated_batches: int = 1
#: Define epoch as a fixed number of batches. Subsequent epochs will continue
#: to iterate through the data, cycling through it when they reach the end.
#: If not set, use exactly one pass through the dataset as one epoch.
#: This configuration only affects the train epochs, test and eval
#: will always test their entire datasets.
num_batches_per_epoch: Optional[int] = None
#: config for optimizer, used in parameter update
optimizer: Optimizer.Config = Adam.Config()
scheduler: Optional[Scheduler.Config] = None
sparsifier: Optional[Sparsifier.Config] = None
def __init__(self, config: Config, model: torch.nn.Module):
if config.early_stop_after > 0:
assert config.do_eval, "can't do early stopping when not running evalution"
optimizer: torch.optim.Optimizer = create_optimizer(config.optimizer, model)
self.scheduler: torch.optim.lr_scheduler = (
create_scheduler(config.scheduler, optimizer)
if config.scheduler
else Scheduler()
)
self.sparsifier: Sparsifier = (
create_sparsifier(config.sparsifier) if config.sparsifier else Sparsifier()
)
model, self.optimizer = precision.initialize(model, optimizer)
self.config = config
@classmethod
def from_config(cls, config: Config, model: torch.nn.Module, *args, **kwargs):
return cls(config, model)
@timing.time("Trainer.test")
def test(self, test_iter, model, metric_reporter: MetricReporter):
state = TrainingState(stage=Stage.TEST, model=model, epoch=1)
if cuda.CUDA_ENABLED:
state.model.cuda()
state.model.eval()
with torch.no_grad():
return self.run_epoch(state, test_iter, metric_reporter)
@timing.time("pre-training")
def set_up_training(self, state: TrainingState, training_data: BatchIterator):
if cuda.CUDA_ENABLED:
state.model.cuda()
state.scheduler.prepare(training_data, self.config.epochs)
if cuda.DISTRIBUTED_WORLD_SIZE > 1:
device_id = torch.cuda.current_device()
state.model = DistributedModel(
module=state.model,
device_ids=[device_id],
output_device=device_id,
broadcast_buffers=False,
find_unused_parameters=state.model.find_unused_parameters,
)
state.start_time = time.time()
if self.config.num_batches_per_epoch:
# Set the training_data iterator to cycle, so it will never run out,
# but rather after reaching the end will loop back to the beginning.
training_data = cycle(training_data)
return training_data
@timing.time("zero gradients")
def zero_grads(self, state):
if state.stage != Stage.TRAIN:
return
state.optimizer.zero_grad()
@timing.time("backprop")
def backprop(self, state, loss):
if state.stage != Stage.TRAIN:
return
with timing.time("loss.backward"):
precision.backward(state.optimizer, loss)
@timing.time("optimizer")
def optimizer_step(self, state):
if state.stage != Stage.TRAIN:
return
state.scheduler.step_batch()
if self.config.max_clip_norm is not None:
grad_norm = precision.clip_grad_norm(
state.model, state.optimizer, self.config.max_clip_norm
)
else:
grad_norm = None
with timing.time("optimizer.step"):
state.optimizer.step()
state.step_counter += 1
# grad_norm could be used to check grads sync in distributed training
return grad_norm
@timing.time("sparsifier")
def sparsification_step(self, state):
# sparsification only if sparifier is used
if not self.config.sparsifier:
return
if state.stage != Stage.TRAIN:
return
if state.sparsifier.sparsification_condition(state):
state.sparsifier.sparsify(state)
if state.rank == 0:
current_sparsity = state.sparsifier.get_current_sparsity(state.model)
print(f"sparsity in the model: {current_sparsity}")
def continue_training(self, state: TrainingState) -> bool:
# Are we done?
if state.epoch >= self.config.epochs:
return False
# Check whether the model has improved recently enough
# Only do this if we're bothering to evaluate the model
if self.config.do_eval and state.epochs_since_last_improvement >= (
self.config.early_stop_after or float("inf")
):
print(
f"Worker {state.rank}: Eval metric hasn't changed for "
+ f"{state.epochs_since_last_improvement} epochs. Stopping now."
)
return False
# Check whether we think the next epoch will put us over the configured
# time limit.
epochs_run = state.epoch + 1
time_elapsed = time.time() - state.start_time
mean_epoch_time = time_elapsed / epochs_run
expected_next_epoch_time = time_elapsed + mean_epoch_time
target_time_limit = (
float("inf")
if self.config.target_time_limit_seconds is None
else self.config.target_time_limit_seconds
)
if expected_next_epoch_time > target_time_limit:
print(
f"Worker {state.rank}: Stopping training after {epochs_run} epochs "
f"and {int(time_elapsed)} seconds, due to the target max training "
f"time of {self.config.target_time_limit_seconds} seconds."
)
return False
return True
def update_best_model(
self, state: TrainingState, train_config: PyTextConfig, eval_metric
):
# This should be updated by all workers so they agree on when to stop training
# when `early_stop_after` is specified.
state.epochs_since_last_improvement = 0
state.best_model_metric = eval_metric
print(f"Found a better model!")
# Only one worker should save checkpoints
if state.rank != 0:
return
model_state = state.model.state_dict()
# save to cpu to avoid multiple model copies in gpu memory
if cuda.CUDA_ENABLED:
for key, parameter in model_state.items():
model_state[key] = parameter.cpu()
state.best_model_state = model_state
@timing.time("save checkpoint")
def save_checkpoint(self, state: TrainingState, train_config: PyTextConfig) -> str:
# Only one worker should save checkpoints
if state.rank != 0:
return
if train_config.save_module_checkpoints or train_config.save_all_checkpoints:
# saves per-epoch sub-modules when save_all_checkpoints or
# save_module_checkpoints is enabled
state.model.save_modules(
base_path=train_config.modules_save_dir, suffix=f"-ep{state.epoch}"
)
if state.epochs_since_last_improvement == 0:
# state.epochs_since_last_improvement == 0 means found a better
# model in current epoch, thus update best model's sub-modules
state.model.save_modules(base_path=train_config.modules_save_dir)
# next to add new config and implementation of frequency on checkpointing
if train_config.save_all_checkpoints:
return save(
config=train_config,
model=state.model,
meta=None,
tensorizers=None,
training_state=state,
identifier=str(state.epoch),
)
def load_best_model(self, state: TrainingState):
if cuda.CUDA_ENABLED:
# Move current model to CPU to avoid multiple models in GPU memory
state.model.cpu()
state.model.load_state_dict(
{k: v.cuda() for k, v in state.best_model_state.items()}
)
# Move model back to GPU
state.model.cuda()
else:
state.model.load_state_dict(state.best_model_state)
def train(
self,
training_data: BatchIterator,
eval_data: BatchIterator,
model: Model,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
rank: int = 0,
) -> Tuple[torch.nn.Module, Any]:
"""
Train and eval a model, the model states will be modified.
Args:
train_iter (BatchIterator): batch iterator of training data
eval_iter (BatchIterator): batch iterator of evaluation data
model (Model): model to be trained
metric_reporter (MetricReporter): compute metric based on training
output and report results to console, file.. etc
train_config (PyTextConfig): training config
training_result (Optional): only meaningful for Hogwild training. default
is None
rank (int): only used in distributed training, the rank of the current
training thread, evaluation will only be done in rank 0
Returns:
model, best_metric: the trained model together with the best metric
"""
state = TrainingState(
model=model,
optimizer=self.optimizer,
scheduler=self.scheduler,
sparsifier=self.sparsifier,
rank=rank,
)
return self.train_from_state(
state, training_data, eval_data, metric_reporter, train_config
)
@timing.time("Trainer.train_from_state")
def train_from_state(
self,
state: TrainingState,
training_data: BatchIterator,
eval_data: BatchIterator,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
) -> Tuple[torch.nn.Module, Any]:
"""
Train and eval a model from a given training state will be modified.
This function iterates epochs specified in config, and for each epoch do:
1. Train model using training data, aggregate and report training results
2. Adjust learning rate if scheduler is specified
3. Evaluate model using evaluation data
4. Calculate metrics based on evaluation results and select best model
Args:
training_state (TrainingState): contrains stateful information to be
able to restore a training job
train_iter (BatchIterator): batch iterator of training data
eval_iter (BatchIterator): batch iterator of evaluation data
model (Model): model to be trained
metric_reporter (MetricReporter): compute metric based on training
output and report results to console, file.. etc
train_config (PyTextConfig): training config
Returns:
model, best_metric: the trained model together with the best metric
"""
training_data = self.set_up_training(state, training_data)
model = state.model
rank = state.rank
trainable_params = sum(
p.numel() for p in state.model.parameters() if p.requires_grad
)
print(f"Num trainable parameters: {trainable_params}")
while self.continue_training(state):
state.epoch += 1
state.epochs_since_last_improvement += 1
lrs = learning_rates(state.optimizer)
print(f"\nWorker {state.rank} starting epoch {state.epoch}")
print(f"Learning rate(s): {", ".join(map(str, lrs))}")
with timing.time("train epoch"):
state.stage = Stage.TRAIN
state.model.train()
print(f"start training epoch {state.epoch}")
epoch_data = training_data
if self.config.num_batches_per_epoch:
# We want to limit the number of batches in the epoch;
# equivalent to epoch_data[:num_batches_per_epoch] for iterators.
# In this case we set the training data iterator to cycle earlier
# in the training process, so when it reaches the end it will
# loop back to the beginning.
epoch_data = itertools.islice(
epoch_data, self.config.num_batches_per_epoch
)
self.run_epoch(state, epoch_data, metric_reporter)
if not self.config.do_eval:
continue
with timing.time("eval epoch"):
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating epoch {state.epoch}")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
# Step the learning rate scheduler(s)
assert eval_metric is not None
state.scheduler.step_epoch(
metrics=metric_reporter.get_model_select_metric(eval_metric),
epoch=state.epoch,
)
# Did we train a better model?
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
if self.optimizer.finalize():
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating finalized state")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
# Only bother loading the best model for master worker
if rank == 0 and state.best_model_state is not None:
self.load_best_model(state)
return state.model, state.best_model_metric
@timing.report_snapshot
def run_epoch(
self, state: TrainingState, data: BatchIterator, metric_reporter: MetricReporter
):
# This method is due for some refactoring, pushing it off because it interacts
# with the metric reporter too much. Much of the logic here either changes in
# the NewTaskTrainer or should change with a better metric reporter design.
report_metric = state.stage != Stage.TRAIN or self.config.report_train_metrics
model = state.model
samples = []
"""
Sometimes, a batch of inputs is too large to fit into GPU, which has to
be split into several micro-batches. However, to improve efficiency,
it would be helpful to only apply params/gradients sync at original batch
boundaries instead of micro-batch boundaries.
num_accumulated_batches specified the number of accumulating gradients
locally before sync gradients, total training_batch_size =
train_batch_size x num_accumulated_batches and it will improve the system
performance by reduce the total network transfer bytes.
"""
for sample in enumerate(data):
samples.append(sample)
if (
state.stage != Stage.TRAIN
or len(samples) == self.config.num_accumulated_batches
):
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
if samples:
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
metrics = None
if report_metric:
with timing.time("report metrics"):
metrics = metric_reporter.report_metric(
model, state.stage, state.epoch, print_to_channels=(state.rank == 0)
)
else:
metric_reporter._reset()
return metrics
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (inputs, targets, context)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
# pass context to model to use in forward call if needed
model.contextualize(context)
with timing.time("model.forward"):
logits = model(*inputs)
with timing.time("compute loss"):
loss = precision.maybe_float(
model.get_loss(logits, targets, context)
)
if BatchContext.IGNORE_LOSS in context:
loss *= 0
elif sample_size > 1:
# gradients averaged per batch and accumulated across samples.
# divide sample_size to let gradients averaged per example
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("get pred"):
preds, scores = model.get_pred(
logits, targets, context, state.stage, *inputs
)
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id, preds, targets, scores, loss.item(), inputs, **context
)
if batch_id % self.config.num_samples_to_log_progress == 0:
print(
f"Running batch {batch_id} for epoch {state.epoch} in {state.stage} stage",
flush=True,
)
# update gradients after len(samples) forward & backward
self.optimizer_step(state)
self.sparsification_step(state)
class TaskTrainer(Trainer):
__EXPANSIBLE__ = True
class Config(Trainer.Config):
"""Make mypy happy"""
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
"""Our run_step is a bit different, because we're wrapping the model forward
call with model.train_batch, which arranges tensors and gets loss, etc.
Whenever "samples" contains more than one mini-batch (sample_size > 1),
we want to accumulate gradients locally and only call all-reduce in the
last backwards pass.
"""
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (raw_batch, batch)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
# enter ddp no_sync context and fp16 delay_scale context if needed
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
with timing.time("model.train_batch"):
loss, metric_data = model.train_batch(model, batch, state)
if sample_size > 1:
# gradients averaged per batch and accumulated across samples.
# divide sample_size to let gradients averaged per example
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id,
*metric_data,
# TODO merge this step into add_batch_stats once all data
# migration is done
**metric_reporter.batch_context(raw_batch, batch),
)
if batch_id % self.config.num_samples_to_log_progress == 0:
metric_reporter.report_realtime_metric(state.stage)
# update gradients after #len(samples) forward & backward
self.optimizer_step(state)
self.sparsification_step(state)
def _prepare_scheduler(self, training_batches, scheduler=None):
"""Batch based schedulers require knowing the number of batches in
the data. We're not supporting that yet with the Data api, need to figure out
how to expose this info or restructure batch-based schedulers to not need it."""
if scheduler.batch_based_schedulers:
raise Exception("New tasks don't yet support batch-based scheduling")
return scheduler
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import time
from contextlib import ExitStack as contextlib_ExitStack
from typing import Any, Iterable, List, Optional, Tuple
import torch
from pytext.common.constants import BatchContext, Stage
from pytext.config import PyTextConfig
from pytext.config.component import (
Component,
ComponentType,
create_optimizer,
create_scheduler,
create_sparsifier,
)
from pytext.config.pytext_config import ConfigBase
from pytext.data.data_handler import BatchIterator
from pytext.metric_reporters import MetricReporter
from pytext.models.distributed_model import DistributedModel
from pytext.models.model import Model
from pytext.optimizer import Adam, Optimizer, learning_rates
from pytext.optimizer.scheduler import Scheduler
from pytext.optimizer.sparsifier import Sparsifier
from pytext.task.serialize import save
from pytext.trainers.training_state import TrainingState
from pytext.utils import cuda, precision, timing
class TrainerBase(Component):
__COMPONENT_TYPE__ = ComponentType.TRAINER
def cycle(iterator: Iterable[Any]) -> Iterable[Any]:
"""Like itertools.cycle, but will call iter on the original iterable instead.
This limits it to not be able to run on say raw generators, but also doesn't
store a copy of the iterable in memory for repetition."""
while True:
yield from iterator
def maybe_accumulate_gradients(exit_stack, model, index, sample_size):
# index == sample_size - 1 represents the last backward pass
if (
cuda.DISTRIBUTED_WORLD_SIZE > 1
and hasattr(model, "no_sync")
and index < sample_size - 1
):
"""
Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),
we want to accumulate gradients locally and only call all-reduce in the
last backwards pass.
"""
exit_stack.enter_context(model.no_sync())
if precision._FP16_ENABLED and index < sample_size - 1:
"""
Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),
we want to accumulate gradients in FP16 parameters (e.g delay unscale)
and only unscale to FP32 parameters after the last backward pass.
"""
exit_stack.enter_context(precision.delay_unscale())
class Trainer(TrainerBase):
"""
Base Trainer class that provide ways to
1 Train model, compute metrics against eval set and use the metrics for
model selection.
2 Test trained model, compute and publish metrics against a blind test set.
Attributes:
epochs (int): Training epochs
early_stop_after (int): Stop after how many epochs when the eval metric
is not improving
max_clip_norm (Optional[float]): Clip gradient norm if set
report_train_metrics (bool): Whether metrics on training data should be
computed and reported.
target_time_limit_seconds (float): Target time limit for training in seconds. If
the expected time to train another epoch exceeds this limit, stop training.
"""
class Config(ConfigBase):
#: Training epochs
epochs: int = 10
#: Stop after how many epochs when the eval metric is not improving
early_stop_after: int = 0
#: Clip gradient norm if set
max_clip_norm: Optional[float] = None
#: Whether metrics on training data should be computed and reported.
report_train_metrics: bool = True
#: Target time limit for training, default (None) to no time limit.
target_time_limit_seconds: Optional[int] = None
#: Whether to do evaluation and model selection based on it.
do_eval: bool = True
#: Number of samples for logging training progress.
num_samples_to_log_progress: int = 1000
#: Number of forward & backward per batch before update gradients, the
#: actual_batch_size = batch_size x num_accumulated_batches
num_accumulated_batches: int = 1
#: Define epoch as a fixed number of batches. Subsequent epochs will continue
#: to iterate through the data, cycling through it when they reach the end.
#: If not set, use exactly one pass through the dataset as one epoch.
#: This configuration only affects the train epochs, test and eval
#: will always test their entire datasets.
num_batches_per_epoch: Optional[int] = None
#: config for optimizer, used in parameter update
optimizer: Optimizer.Config = Adam.Config()
scheduler: Optional[Scheduler.Config] = None
sparsifier: Optional[Sparsifier.Config] = None
def __init__(self, config: Config, model: torch.nn.Module):
if config.early_stop_after > 0:
assert config.do_eval, "can't do early stopping when not running evalution"
optimizer: torch.optim.Optimizer = create_optimizer(config.optimizer, model)
self.scheduler: torch.optim.lr_scheduler = (
create_scheduler(config.scheduler, optimizer)
if config.scheduler
else Scheduler()
)
self.sparsifier: Sparsifier = (
create_sparsifier(config.sparsifier) if config.sparsifier else Sparsifier()
)
model, self.optimizer = precision.initialize(model, optimizer)
self.config = config
@classmethod
def from_config(cls, config: Config, model: torch.nn.Module, *args, **kwargs):
return cls(config, model)
@timing.time("Trainer.test")
def test(self, test_iter, model, metric_reporter: MetricReporter):
state = TrainingState(stage=Stage.TEST, model=model, epoch=1)
if cuda.CUDA_ENABLED:
state.model.cuda()
state.model.eval()
with torch.no_grad():
return self.run_epoch(state, test_iter, metric_reporter)
@timing.time("pre-training")
def set_up_training(self, state: TrainingState, training_data: BatchIterator):
if cuda.CUDA_ENABLED:
state.model.cuda()
state.scheduler.prepare(training_data, self.config.epochs)
if cuda.DISTRIBUTED_WORLD_SIZE > 1:
device_id = torch.cuda.current_device()
state.model = DistributedModel(
module=state.model,
device_ids=[device_id],
output_device=device_id,
broadcast_buffers=False,
find_unused_parameters=state.model.find_unused_parameters,
)
state.start_time = time.time()
if self.config.num_batches_per_epoch:
# Set the training_data iterator to cycle, so it will never run out,
# but rather after reaching the end will loop back to the beginning.
training_data = cycle(training_data)
return training_data
@timing.time("zero gradients")
def zero_grads(self, state):
if state.stage != Stage.TRAIN:
return
state.optimizer.zero_grad()
@timing.time("backprop")
def backprop(self, state, loss):
if state.stage != Stage.TRAIN:
return
with timing.time("loss.backward"):
precision.backward(state.optimizer, loss)
@timing.time("optimizer")
def optimizer_step(self, state):
if state.stage != Stage.TRAIN:
return
state.scheduler.step_batch()
if self.config.max_clip_norm is not None:
grad_norm = precision.clip_grad_norm(
state.model, state.optimizer, self.config.max_clip_norm
)
else:
grad_norm = None
with timing.time("optimizer.step"):
state.optimizer.step()
state.step_counter += 1
# grad_norm could be used to check grads sync in distributed training
return grad_norm
@timing.time("sparsifier")
def sparsification_step(self, state):
# sparsification only if sparifier is used
if not self.config.sparsifier:
return
if state.stage != Stage.TRAIN:
return
if state.sparsifier.sparsification_condition(state):
state.sparsifier.sparsify(state)
if state.rank == 0:
current_sparsity = state.sparsifier.get_current_sparsity(state.model)
print(f"sparsity in the model: {current_sparsity}")
def continue_training(self, state: TrainingState) -> bool:
# Are we done?
if state.epoch >= self.config.epochs:
return False
# Check whether the model has improved recently enough
# Only do this if we're bothering to evaluate the model
if self.config.do_eval and state.epochs_since_last_improvement >= (
self.config.early_stop_after or float("inf")
):
print(
f"Worker {state.rank}: Eval metric hasn't changed for "
+ f"{state.epochs_since_last_improvement} epochs. Stopping now."
)
return False
# Check whether we think the next epoch will put us over the configured
# time limit.
epochs_run = state.epoch + 1
time_elapsed = time.time() - state.start_time
mean_epoch_time = time_elapsed / epochs_run
expected_next_epoch_time = time_elapsed + mean_epoch_time
target_time_limit = (
float("inf")
if self.config.target_time_limit_seconds is None
else self.config.target_time_limit_seconds
)
if expected_next_epoch_time > target_time_limit:
print(
f"Worker {state.rank}: Stopping training after {epochs_run} epochs "
f"and {int(time_elapsed)} seconds, due to the target max training "
f"time of {self.config.target_time_limit_seconds} seconds."
)
return False
return True
def update_best_model(
self, state: TrainingState, train_config: PyTextConfig, eval_metric
):
# This should be updated by all workers so they agree on when to stop training
# when `early_stop_after` is specified.
state.epochs_since_last_improvement = 0
state.best_model_metric = eval_metric
print(f"Found a better model!")
# Only one worker should save checkpoints
if state.rank != 0:
return
model_state = state.model.state_dict()
# save to cpu to avoid multiple model copies in gpu memory
if cuda.CUDA_ENABLED:
for key, parameter in model_state.items():
model_state[key] = parameter.cpu()
state.best_model_state = model_state
@timing.time("save checkpoint")
def save_checkpoint(self, state: TrainingState, train_config: PyTextConfig) -> str:
# Only one worker should save checkpoints
if state.rank != 0:
return
if train_config.save_module_checkpoints or train_config.save_all_checkpoints:
# saves per-epoch sub-modules when save_all_checkpoints or
# save_module_checkpoints is enabled
state.model.save_modules(
base_path=train_config.modules_save_dir, suffix=f"-ep{state.epoch}"
)
if state.epochs_since_last_improvement == 0:
# state.epochs_since_last_improvement == 0 means found a better
# model in current epoch, thus update best model's sub-modules
state.model.save_modules(base_path=train_config.modules_save_dir)
# next to add new config and implementation of frequency on checkpointing
if train_config.save_all_checkpoints:
return save(
config=train_config,
model=state.model,
meta=None,
tensorizers=None,
training_state=state,
identifier=str(state.epoch),
)
def load_best_model(self, state: TrainingState):
if cuda.CUDA_ENABLED:
# Move current model to CPU to avoid multiple models in GPU memory
state.model.cpu()
state.model.load_state_dict(
{k: v.cuda() for k, v in state.best_model_state.items()}
)
# Move model back to GPU
state.model.cuda()
else:
state.model.load_state_dict(state.best_model_state)
def train(
self,
training_data: BatchIterator,
eval_data: BatchIterator,
model: Model,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
rank: int = 0,
) -> Tuple[torch.nn.Module, Any]:
"""
Train and eval a model, the model states will be modified.
Args:
train_iter (BatchIterator): batch iterator of training data
eval_iter (BatchIterator): batch iterator of evaluation data
model (Model): model to be trained
metric_reporter (MetricReporter): compute metric based on training
output and report results to console, file.. etc
train_config (PyTextConfig): training config
training_result (Optional): only meaningful for Hogwild training. default
is None
rank (int): only used in distributed training, the rank of the current
training thread, evaluation will only be done in rank 0
Returns:
model, best_metric: the trained model together with the best metric
"""
state = TrainingState(
model=model,
optimizer=self.optimizer,
scheduler=self.scheduler,
sparsifier=self.sparsifier,
rank=rank,
)
return self.train_from_state(
state, training_data, eval_data, metric_reporter, train_config
)
@timing.time("Trainer.train_from_state")
def train_from_state(
self,
state: TrainingState,
training_data: BatchIterator,
eval_data: BatchIterator,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
) -> Tuple[torch.nn.Module, Any]:
"""
Train and eval a model from a given training state will be modified.
This function iterates epochs specified in config, and for each epoch do:
1. Train model using training data, aggregate and report training results
2. Adjust learning rate if scheduler is specified
3. Evaluate model using evaluation data
4. Calculate metrics based on evaluation results and select best model
Args:
training_state (TrainingState): contrains stateful information to be
able to restore a training job
train_iter (BatchIterator): batch iterator of training data
eval_iter (BatchIterator): batch iterator of evaluation data
model (Model): model to be trained
metric_reporter (MetricReporter): compute metric based on training
output and report results to console, file.. etc
train_config (PyTextConfig): training config
Returns:
model, best_metric: the trained model together with the best metric
"""
training_data = self.set_up_training(state, training_data)
model = state.model
rank = state.rank
trainable_params = sum(
p.numel() for p in state.model.parameters() if p.requires_grad
)
print(f"Num trainable parameters: {trainable_params}")
while self.continue_training(state):
state.epoch += 1
state.epochs_since_last_improvement += 1
lrs = learning_rates(state.optimizer)
print(f"\nWorker {state.rank} starting epoch {state.epoch}")
print(f"Learning rate(s): {', '.join(map(str, lrs))}")
with timing.time("train epoch"):
state.stage = Stage.TRAIN
state.model.train()
print(f"start training epoch {state.epoch}")
epoch_data = training_data
if self.config.num_batches_per_epoch:
# We want to limit the number of batches in the epoch;
# equivalent to epoch_data[:num_batches_per_epoch] for iterators.
# In this case we set the training data iterator to cycle earlier
# in the training process, so when it reaches the end it will
# loop back to the beginning.
epoch_data = itertools.islice(
epoch_data, self.config.num_batches_per_epoch
)
self.run_epoch(state, epoch_data, metric_reporter)
if not self.config.do_eval:
continue
with timing.time("eval epoch"):
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating epoch {state.epoch}")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
# Step the learning rate scheduler(s)
assert eval_metric is not None
state.scheduler.step_epoch(
metrics=metric_reporter.get_model_select_metric(eval_metric),
epoch=state.epoch,
)
# Did we train a better model?
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
if self.optimizer.finalize():
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating finalized state")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
# Only bother loading the best model for master worker
if rank == 0 and state.best_model_state is not None:
self.load_best_model(state)
return state.model, state.best_model_metric
@timing.report_snapshot
def run_epoch(
self, state: TrainingState, data: BatchIterator, metric_reporter: MetricReporter
):
# This method is due for some refactoring, pushing it off because it interacts
# with the metric reporter too much. Much of the logic here either changes in
# the NewTaskTrainer or should change with a better metric reporter design.
report_metric = state.stage != Stage.TRAIN or self.config.report_train_metrics
model = state.model
samples = []
"""
Sometimes, a batch of inputs is too large to fit into GPU, which has to
be split into several micro-batches. However, to improve efficiency,
it would be helpful to only apply params/gradients sync at original batch
boundaries instead of micro-batch boundaries.
num_accumulated_batches specified the number of accumulating gradients
locally before sync gradients, total training_batch_size =
train_batch_size x num_accumulated_batches and it will improve the system
performance by reduce the total network transfer bytes.
"""
for sample in enumerate(data):
samples.append(sample)
if (
state.stage != Stage.TRAIN
or len(samples) == self.config.num_accumulated_batches
):
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
if samples:
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
metrics = None
if report_metric:
with timing.time("report metrics"):
metrics = metric_reporter.report_metric(
model, state.stage, state.epoch, print_to_channels=(state.rank == 0)
)
else:
metric_reporter._reset()
return metrics
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (inputs, targets, context)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
# pass context to model to use in forward call if needed
model.contextualize(context)
with timing.time("model.forward"):
logits = model(*inputs)
with timing.time("compute loss"):
loss = precision.maybe_float(
model.get_loss(logits, targets, context)
)
if BatchContext.IGNORE_LOSS in context:
loss *= 0
elif sample_size > 1:
# gradients averaged per batch and accumulated across samples.
# divide sample_size to let gradients averaged per example
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("get pred"):
preds, scores = model.get_pred(
logits, targets, context, state.stage, *inputs
)
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id, preds, targets, scores, loss.item(), inputs, **context
)
if batch_id % self.config.num_samples_to_log_progress == 0:
print(
f"Running batch {batch_id} for epoch {state.epoch} in {state.stage} stage",
flush=True,
)
# update gradients after len(samples) forward & backward
self.optimizer_step(state)
self.sparsification_step(state)
class TaskTrainer(Trainer):
__EXPANSIBLE__ = True
class Config(Trainer.Config):
"""Make mypy happy"""
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
"""Our run_step is a bit different, because we're wrapping the model forward
call with model.train_batch, which arranges tensors and gets loss, etc.
Whenever "samples" contains more than one mini-batch (sample_size > 1),
we want to accumulate gradients locally and only call all-reduce in the
last backwards pass.
"""
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (raw_batch, batch)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
# enter ddp no_sync context and fp16 delay_scale context if needed
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
with timing.time("model.train_batch"):
loss, metric_data = model.train_batch(model, batch, state)
if sample_size > 1:
# gradients averaged per batch and accumulated across samples.
# divide sample_size to let gradients averaged per example
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id,
*metric_data,
# TODO merge this step into add_batch_stats once all data
# migration is done
**metric_reporter.batch_context(raw_batch, batch),
)
if batch_id % self.config.num_samples_to_log_progress == 0:
metric_reporter.report_realtime_metric(state.stage)
# update gradients after #len(samples) forward & backward
self.optimizer_step(state)
self.sparsification_step(state)
def _prepare_scheduler(self, training_batches, scheduler=None):
"""Batch based schedulers require knowing the number of batches in
the data. We're not supporting that yet with the Data api, need to figure out
how to expose this info or restructure batch-based schedulers to not need it."""
if scheduler.batch_based_schedulers:
raise Exception("New tasks don't yet support batch-based scheduling")
return scheduler
|
import numpy as np
import astropy.units as u
import astropy.wcs.utils
from astropy.coordinates import (
ITRS,
BaseCoordinateFrame,
CartesianRepresentation,
SkyCoord,
SphericalRepresentation,
)
from astropy.wcs import WCS
from sunpy import log
from .frames import (
BaseCoordinateFrame,
Heliocentric,
HeliographicCarrington,
HeliographicStonyhurst,
Helioprojective,
SunPyBaseCoordinateFrame,
)
__all__ = ['solar_wcs_frame_mapping', 'solar_frame_to_wcs_mapping']
try:
# TODO: Remove vendored version after Astropy 5.0
from astropy.wcs.utils import obsgeo_to_frame
except ImportError:
def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an `~builtin_frames.ITRS` coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time assiociated with the coordinate, will be passed to
`~.builtin_frames.ITRS` as the obstime keyword.
Returns
-------
`~.builtin_frames.ITRS`
An `~.builtin_frames.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
if (obsgeo is None
or len(obsgeo) != 6
or np.all(np.array(obsgeo) == 0)
or np.all(~np.isfinite(obsgeo))
): # NOQA
raise ValueError(f"Can not parse the 'obsgeo' location ({obsgeo}). "
"obsgeo should be a length 6 non-zero, finite numpy array")
# If the cartesian coords are zero or have NaNs in them use the spherical ones
if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):
data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))
# Otherwise we assume the cartesian ones are valid
else:
data = CartesianRepresentation(*obsgeo[:3] * u.m)
return ITRS(data, obstime=obstime)
def solar_wcs_frame_mapping(wcs):
"""
This function registers the coordinates frames to their FITS-WCS coordinate
type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry.
Parameters
----------
wcs : astropy.wcs.WCS
Returns
-------
astropy.coordinates.BaseCoordinateFrame
"""
if hasattr(wcs, "coordinate_frame"):
return wcs.coordinate_frame
dateobs = wcs.wcs.dateobs or None
# Get observer coordinate from the WCS auxillary information
required_attrs = {HeliographicStonyhurst: ['hgln_obs', 'hglt_obs', 'dsun_obs'],
HeliographicCarrington: ['crln_obs', 'hglt_obs', 'dsun_obs']}
# Get rsun from the WCS auxillary information
rsun = wcs.wcs.aux.rsun_ref
if rsun is not None:
rsun *= u.m
# TODO: remove these errors in sunpy 4.1
bad_attrs = [f'.{attr}' for attr in ['rsun', 'heliographic_observer']
if hasattr(wcs, attr)]
if len(bad_attrs):
raise ValueError(f"The {" and ".join(bad_attrs)} attribute(s) on a WCS "
"are no longer supported.")
observer = None
for frame, attr_names in required_attrs.items():
attrs = [getattr(wcs.wcs.aux, attr_name) for attr_name in attr_names]
if all([attr is not None for attr in attrs]):
kwargs = {'obstime': dateobs}
if rsun is not None:
kwargs['rsun'] = rsun
if issubclass(frame, HeliographicCarrington):
kwargs['observer'] = 'self'
observer = frame(attrs[0] * u.deg,
attrs[1] * u.deg,
attrs[2] * u.m,
**kwargs)
# Read the observer out of obsgeo for ground based observers
if observer is None:
try:
observer = obsgeo_to_frame(wcs.wcs.obsgeo, dateobs)
observer = SkyCoord(observer, rsun=rsun)
except ValueError as e:
# The helper function assumes you know the obsgeo coords you are
# parsing are good, we are not sure, so catch the error.
# This approach could lead to an invalid observer (i.e. one of the
# coords being NaN), but only if the WCS has been constructed like that.
log.debug(f"Could not parse obsgeo coordinates from WCS:\n{e}")
# Collect all of the possible frame attributes, although some may be removed later
frame_args = {'obstime': dateobs}
if observer is not None:
frame_args['observer'] = observer
if rsun is not None:
frame_args['rsun'] = rsun
frame_class = _sunpy_frame_class_from_ctypes(wcs.wcs.ctype)
if frame_class:
if frame_class == HeliographicStonyhurst:
frame_args.pop('observer', None)
if frame_class == Heliocentric:
frame_args.pop('rsun', None)
return frame_class(**frame_args)
def _sunpy_frame_class_from_ctypes(ctypes):
# Truncate the ctype to the first four letters
ctypes = {c[:4] for c in ctypes}
mapping = {
Helioprojective: {'HPLN', 'HPLT'},
HeliographicStonyhurst: {'HGLN', 'HGLT'},
HeliographicCarrington: {'CRLN', 'CRLT'},
Heliocentric: {'SOLX', 'SOLY'},
}
for frame_class, ctype_pair in mapping.items():
if ctype_pair <= ctypes:
return frame_class
def _set_wcs_aux_obs_coord(wcs, obs_frame):
"""
Set (in-place) observer coordinate information on a WCS.
Parameters
----------
wcs : astropy.wcs.WCS
obs_frame : astropy.coordinates.SkyCoord, astropy.coordinates.CoordinateFrame
"""
# Sometimes obs_coord can be a SkyCoord, so convert down to a frame
if hasattr(obs_frame, 'frame'):
obs_frame = obs_frame.frame
if isinstance(obs_frame, HeliographicStonyhurst):
wcs.wcs.aux.hgln_obs = obs_frame.lon.to_value(u.deg)
elif isinstance(obs_frame, HeliographicCarrington):
wcs.wcs.aux.crln_obs = obs_frame.lon.to_value(u.deg)
else:
raise ValueError('obs_coord must be in a Stonyhurst or Carrington frame')
# These two keywords are the same for Carrington and Stonyhurst
wcs.wcs.aux.hglt_obs = obs_frame.lat.to_value(u.deg)
wcs.wcs.aux.dsun_obs = obs_frame.radius.to_value(u.m)
def solar_frame_to_wcs_mapping(frame, projection='TAN'):
"""
For a given frame, this function returns the corresponding WCS object.
It registers the WCS coordinates types from their associated frame in the
`astropy.wcs.utils.celestial_frame_to_wcs` registry.
Parameters
----------
frame : astropy.coordinates.BaseCoordinateFrame
projection : str, optional
Returns
-------
astropy.wcs.WCS
"""
wcs = WCS(naxis=2)
if hasattr(frame, 'rsun'):
wcs.wcs.aux.rsun_ref = frame.rsun.to_value(u.m)
if hasattr(frame, 'observer') and frame.observer is not None:
if isinstance(frame.observer, BaseCoordinateFrame):
observer = frame.observer
elif frame.observer == 'self':
observer = frame
_set_wcs_aux_obs_coord(wcs, observer)
if isinstance(frame, SunPyBaseCoordinateFrame):
if frame.obstime:
wcs.wcs.dateobs = frame.obstime.utc.isot
if isinstance(frame, Helioprojective):
xcoord = 'HPLN' + '-' + projection
ycoord = 'HPLT' + '-' + projection
wcs.wcs.cunit = ['arcsec', 'arcsec']
elif isinstance(frame, Heliocentric):
xcoord = 'SOLX'
ycoord = 'SOLY'
wcs.wcs.cunit = ['deg', 'deg']
elif isinstance(frame, HeliographicCarrington):
xcoord = 'CRLN' + '-' + projection
ycoord = 'CRLT' + '-' + projection
wcs.wcs.cunit = ['deg', 'deg']
elif isinstance(frame, HeliographicStonyhurst):
xcoord = 'HGLN' + '-' + projection
ycoord = 'HGLT' + '-' + projection
wcs.wcs.cunit = ['deg', 'deg']
else:
return None
wcs.wcs.ctype = [xcoord, ycoord]
return wcs
astropy.wcs.utils.WCS_FRAME_MAPPINGS.append([solar_wcs_frame_mapping])
astropy.wcs.utils.FRAME_WCS_MAPPINGS.append([solar_frame_to_wcs_mapping])
|
import numpy as np
import astropy.units as u
import astropy.wcs.utils
from astropy.coordinates import (
ITRS,
BaseCoordinateFrame,
CartesianRepresentation,
SkyCoord,
SphericalRepresentation,
)
from astropy.wcs import WCS
from sunpy import log
from .frames import (
BaseCoordinateFrame,
Heliocentric,
HeliographicCarrington,
HeliographicStonyhurst,
Helioprojective,
SunPyBaseCoordinateFrame,
)
__all__ = ['solar_wcs_frame_mapping', 'solar_frame_to_wcs_mapping']
try:
# TODO: Remove vendored version after Astropy 5.0
from astropy.wcs.utils import obsgeo_to_frame
except ImportError:
def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an `~builtin_frames.ITRS` coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time assiociated with the coordinate, will be passed to
`~.builtin_frames.ITRS` as the obstime keyword.
Returns
-------
`~.builtin_frames.ITRS`
An `~.builtin_frames.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
if (obsgeo is None
or len(obsgeo) != 6
or np.all(np.array(obsgeo) == 0)
or np.all(~np.isfinite(obsgeo))
): # NOQA
raise ValueError(f"Can not parse the 'obsgeo' location ({obsgeo}). "
"obsgeo should be a length 6 non-zero, finite numpy array")
# If the cartesian coords are zero or have NaNs in them use the spherical ones
if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):
data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))
# Otherwise we assume the cartesian ones are valid
else:
data = CartesianRepresentation(*obsgeo[:3] * u.m)
return ITRS(data, obstime=obstime)
def solar_wcs_frame_mapping(wcs):
"""
This function registers the coordinates frames to their FITS-WCS coordinate
type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry.
Parameters
----------
wcs : astropy.wcs.WCS
Returns
-------
astropy.coordinates.BaseCoordinateFrame
"""
if hasattr(wcs, "coordinate_frame"):
return wcs.coordinate_frame
dateobs = wcs.wcs.dateobs or None
# Get observer coordinate from the WCS auxillary information
required_attrs = {HeliographicStonyhurst: ['hgln_obs', 'hglt_obs', 'dsun_obs'],
HeliographicCarrington: ['crln_obs', 'hglt_obs', 'dsun_obs']}
# Get rsun from the WCS auxillary information
rsun = wcs.wcs.aux.rsun_ref
if rsun is not None:
rsun *= u.m
# TODO: remove these errors in sunpy 4.1
bad_attrs = [f'.{attr}' for attr in ['rsun', 'heliographic_observer']
if hasattr(wcs, attr)]
if len(bad_attrs):
raise ValueError(f"The {' and '.join(bad_attrs)} attribute(s) on a WCS "
"are no longer supported.")
observer = None
for frame, attr_names in required_attrs.items():
attrs = [getattr(wcs.wcs.aux, attr_name) for attr_name in attr_names]
if all([attr is not None for attr in attrs]):
kwargs = {'obstime': dateobs}
if rsun is not None:
kwargs['rsun'] = rsun
if issubclass(frame, HeliographicCarrington):
kwargs['observer'] = 'self'
observer = frame(attrs[0] * u.deg,
attrs[1] * u.deg,
attrs[2] * u.m,
**kwargs)
# Read the observer out of obsgeo for ground based observers
if observer is None:
try:
observer = obsgeo_to_frame(wcs.wcs.obsgeo, dateobs)
observer = SkyCoord(observer, rsun=rsun)
except ValueError as e:
# The helper function assumes you know the obsgeo coords you are
# parsing are good, we are not sure, so catch the error.
# This approach could lead to an invalid observer (i.e. one of the
# coords being NaN), but only if the WCS has been constructed like that.
log.debug(f"Could not parse obsgeo coordinates from WCS:\n{e}")
# Collect all of the possible frame attributes, although some may be removed later
frame_args = {'obstime': dateobs}
if observer is not None:
frame_args['observer'] = observer
if rsun is not None:
frame_args['rsun'] = rsun
frame_class = _sunpy_frame_class_from_ctypes(wcs.wcs.ctype)
if frame_class:
if frame_class == HeliographicStonyhurst:
frame_args.pop('observer', None)
if frame_class == Heliocentric:
frame_args.pop('rsun', None)
return frame_class(**frame_args)
def _sunpy_frame_class_from_ctypes(ctypes):
# Truncate the ctype to the first four letters
ctypes = {c[:4] for c in ctypes}
mapping = {
Helioprojective: {'HPLN', 'HPLT'},
HeliographicStonyhurst: {'HGLN', 'HGLT'},
HeliographicCarrington: {'CRLN', 'CRLT'},
Heliocentric: {'SOLX', 'SOLY'},
}
for frame_class, ctype_pair in mapping.items():
if ctype_pair <= ctypes:
return frame_class
def _set_wcs_aux_obs_coord(wcs, obs_frame):
"""
Set (in-place) observer coordinate information on a WCS.
Parameters
----------
wcs : astropy.wcs.WCS
obs_frame : astropy.coordinates.SkyCoord, astropy.coordinates.CoordinateFrame
"""
# Sometimes obs_coord can be a SkyCoord, so convert down to a frame
if hasattr(obs_frame, 'frame'):
obs_frame = obs_frame.frame
if isinstance(obs_frame, HeliographicStonyhurst):
wcs.wcs.aux.hgln_obs = obs_frame.lon.to_value(u.deg)
elif isinstance(obs_frame, HeliographicCarrington):
wcs.wcs.aux.crln_obs = obs_frame.lon.to_value(u.deg)
else:
raise ValueError('obs_coord must be in a Stonyhurst or Carrington frame')
# These two keywords are the same for Carrington and Stonyhurst
wcs.wcs.aux.hglt_obs = obs_frame.lat.to_value(u.deg)
wcs.wcs.aux.dsun_obs = obs_frame.radius.to_value(u.m)
def solar_frame_to_wcs_mapping(frame, projection='TAN'):
"""
For a given frame, this function returns the corresponding WCS object.
It registers the WCS coordinates types from their associated frame in the
`astropy.wcs.utils.celestial_frame_to_wcs` registry.
Parameters
----------
frame : astropy.coordinates.BaseCoordinateFrame
projection : str, optional
Returns
-------
astropy.wcs.WCS
"""
wcs = WCS(naxis=2)
if hasattr(frame, 'rsun'):
wcs.wcs.aux.rsun_ref = frame.rsun.to_value(u.m)
if hasattr(frame, 'observer') and frame.observer is not None:
if isinstance(frame.observer, BaseCoordinateFrame):
observer = frame.observer
elif frame.observer == 'self':
observer = frame
_set_wcs_aux_obs_coord(wcs, observer)
if isinstance(frame, SunPyBaseCoordinateFrame):
if frame.obstime:
wcs.wcs.dateobs = frame.obstime.utc.isot
if isinstance(frame, Helioprojective):
xcoord = 'HPLN' + '-' + projection
ycoord = 'HPLT' + '-' + projection
wcs.wcs.cunit = ['arcsec', 'arcsec']
elif isinstance(frame, Heliocentric):
xcoord = 'SOLX'
ycoord = 'SOLY'
wcs.wcs.cunit = ['deg', 'deg']
elif isinstance(frame, HeliographicCarrington):
xcoord = 'CRLN' + '-' + projection
ycoord = 'CRLT' + '-' + projection
wcs.wcs.cunit = ['deg', 'deg']
elif isinstance(frame, HeliographicStonyhurst):
xcoord = 'HGLN' + '-' + projection
ycoord = 'HGLT' + '-' + projection
wcs.wcs.cunit = ['deg', 'deg']
else:
return None
wcs.wcs.ctype = [xcoord, ycoord]
return wcs
astropy.wcs.utils.WCS_FRAME_MAPPINGS.append([solar_wcs_frame_mapping])
astropy.wcs.utils.FRAME_WCS_MAPPINGS.append([solar_frame_to_wcs_mapping])
|
import asyncio
import json
import logging
from pathlib import Path
from typing import Dict, List, TypedDict
from gql import Client, gql
from gql.client import AsyncClientSession
from gql.transport.aiohttp import AIOHTTPTransport
log = logging.getLogger(__name__)
class Episode(TypedDict):
id: str
title: str
file: str
class Program(TypedDict):
id: str
title: str
foreign_title: str
short_description: str
episodes: List[Episode]
Programs = Dict[str, Program]
class RUVClient:
"""An HTTP client to gather a program list from ruv.is."""
def __init__(self) -> None:
self.url = "https://www.ruv.is/gql/"
transport = AIOHTTPTransport(self.url)
self.client = Client(transport=transport, execute_timeout=30)
@staticmethod
async def _query_categories(session: AsyncClientSession) -> List[str]:
query = gql(
"""
query getCategorys($station: StationSearch!) {
Category(station: $station) {
categories {
title
slug
}
}
}
"""
)
params = {
"station": "tv",
}
result = await session.execute(query, variable_values=params)
category_slugs = [category["slug"] for category in result["Category"]["categories"]] # type: ignore
return category_slugs
@staticmethod
async def _query_category(session: AsyncClientSession, category: str) -> List[Program]:
query = gql(
"""
query getKrakkaRUVCategories($station: StationSearch!, $category: String!) {
Category(station: $station, category: $category) {
categories {
programs {
short_description
episodes {
id
title
file
}
title
foreign_title
short_description
id
}
}
}
}
"""
)
params = {
"station": "tv",
"category": category,
}
result = await session.execute(query, variable_values=params)
return [
program for category in result["Category"]["categories"] for program in category["programs"] # type: ignore
]
async def _get_all_categories(self) -> List[Program]:
async with self.client as session:
categories = await self._query_categories(session)
list_of_programs_lists = await asyncio.gather(
*[asyncio.create_task(self._query_category(session, category=category)) for category in categories]
)
return [program for program_list in list_of_programs_lists for program in program_list]
@staticmethod
async def _query_all_programs(session: AsyncClientSession) -> List[Program]:
query = gql(
"""
query {
Programs {
short_description
episodes {
id
title
file
}
title
foreign_title
short_description
id
}
}
"""
)
result = await session.execute(query)
return [program for program in result["Programs"]] # type: ignore
async def _get_all_programs(self) -> Programs:
async with self.client as session:
programs = await self._query_all_programs(session)
programs_dict = {program["id"]: program for program in programs}
categories = await self._query_categories(session)
list_of_programs_lists = await asyncio.gather(
*[asyncio.create_task(self._query_category(session, category=category)) for category in categories]
)
programs_with_extra_info = {
program["id"]: program for program_list in list_of_programs_lists for program in program_list
}
self._add_extra_info(programs_dict, programs_with_extra_info)
return programs_dict
def get_all_programs(self) -> Programs:
return asyncio.run(self._get_all_programs())
@staticmethod
def _add_extra_info(programs: Programs, programs_extra_info: Programs) -> None:
"""Adds extra information from another program list to the first one."""
for p_id, program in programs.items():
if p_id in programs_extra_info:
for key in ["short_description", "foreign_title"]:
program[key] = programs_extra_info[program["id"]][key] # type: ignore
def save_programs(file_path: Path, programs: Programs):
with file_path.open("w") as f:
json.dump(programs, f)
def load_programs_cache(file_path: Path) -> Programs:
with file_path.open("r") as f:
return json.load(f)
def load_programs(force_reload, cache: Path) -> Programs:
"""Load the programs by either loading from cache or by querying ruv.is."""
if force_reload:
programs = RUVClient().get_all_programs()
else:
try:
return load_programs_cache(cache)
except FileNotFoundError:
programs = RUVClient().get_all_programs()
save_programs(cache, programs)
log.info(
f"Loaded {len(programs)} programs and {sum([len(program["episodes"]) for program in programs.values()])} episodes"
)
return programs
|
import asyncio
import json
import logging
from pathlib import Path
from typing import Dict, List, TypedDict
from gql import Client, gql
from gql.client import AsyncClientSession
from gql.transport.aiohttp import AIOHTTPTransport
log = logging.getLogger(__name__)
class Episode(TypedDict):
id: str
title: str
file: str
class Program(TypedDict):
id: str
title: str
foreign_title: str
short_description: str
episodes: List[Episode]
Programs = Dict[str, Program]
class RUVClient:
"""An HTTP client to gather a program list from ruv.is."""
def __init__(self) -> None:
self.url = "https://www.ruv.is/gql/"
transport = AIOHTTPTransport(self.url)
self.client = Client(transport=transport, execute_timeout=30)
@staticmethod
async def _query_categories(session: AsyncClientSession) -> List[str]:
query = gql(
"""
query getCategorys($station: StationSearch!) {
Category(station: $station) {
categories {
title
slug
}
}
}
"""
)
params = {
"station": "tv",
}
result = await session.execute(query, variable_values=params)
category_slugs = [category["slug"] for category in result["Category"]["categories"]] # type: ignore
return category_slugs
@staticmethod
async def _query_category(session: AsyncClientSession, category: str) -> List[Program]:
query = gql(
"""
query getKrakkaRUVCategories($station: StationSearch!, $category: String!) {
Category(station: $station, category: $category) {
categories {
programs {
short_description
episodes {
id
title
file
}
title
foreign_title
short_description
id
}
}
}
}
"""
)
params = {
"station": "tv",
"category": category,
}
result = await session.execute(query, variable_values=params)
return [
program for category in result["Category"]["categories"] for program in category["programs"] # type: ignore
]
async def _get_all_categories(self) -> List[Program]:
async with self.client as session:
categories = await self._query_categories(session)
list_of_programs_lists = await asyncio.gather(
*[asyncio.create_task(self._query_category(session, category=category)) for category in categories]
)
return [program for program_list in list_of_programs_lists for program in program_list]
@staticmethod
async def _query_all_programs(session: AsyncClientSession) -> List[Program]:
query = gql(
"""
query {
Programs {
short_description
episodes {
id
title
file
}
title
foreign_title
short_description
id
}
}
"""
)
result = await session.execute(query)
return [program for program in result["Programs"]] # type: ignore
async def _get_all_programs(self) -> Programs:
async with self.client as session:
programs = await self._query_all_programs(session)
programs_dict = {program["id"]: program for program in programs}
categories = await self._query_categories(session)
list_of_programs_lists = await asyncio.gather(
*[asyncio.create_task(self._query_category(session, category=category)) for category in categories]
)
programs_with_extra_info = {
program["id"]: program for program_list in list_of_programs_lists for program in program_list
}
self._add_extra_info(programs_dict, programs_with_extra_info)
return programs_dict
def get_all_programs(self) -> Programs:
return asyncio.run(self._get_all_programs())
@staticmethod
def _add_extra_info(programs: Programs, programs_extra_info: Programs) -> None:
"""Adds extra information from another program list to the first one."""
for p_id, program in programs.items():
if p_id in programs_extra_info:
for key in ["short_description", "foreign_title"]:
program[key] = programs_extra_info[program["id"]][key] # type: ignore
def save_programs(file_path: Path, programs: Programs):
with file_path.open("w") as f:
json.dump(programs, f)
def load_programs_cache(file_path: Path) -> Programs:
with file_path.open("r") as f:
return json.load(f)
def load_programs(force_reload, cache: Path) -> Programs:
"""Load the programs by either loading from cache or by querying ruv.is."""
if force_reload:
programs = RUVClient().get_all_programs()
else:
try:
return load_programs_cache(cache)
except FileNotFoundError:
programs = RUVClient().get_all_programs()
save_programs(cache, programs)
log.info(
f"Loaded {len(programs)} programs and {sum([len(program['episodes']) for program in programs.values()])} episodes"
)
return programs
|
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
# from typing import cast as typecast
import json
import logging
import os
import yaml
from .config import Config
from .acresource import ACResource
from ..utils import parse_yaml, dump_yaml
AnyDict = Dict[str, Any]
HandlerResult = Optional[Tuple[str, List[AnyDict]]]
# Some thoughts:
# - loading a bunch of Ambassador resources is different from loading a bunch of K8s
# services, because we should assume that if we're being a fed a bunch of Ambassador
# resources, we'll get a full set. The whole 'secret loader' thing needs to have the
# concept of a TLSSecret resource that can be force-fed to us, or that can be fetched
# through the loader if needed.
# - If you're running a debug-loop Ambassador, you should just have a flat (or
# recursive, I don't care) directory full of Ambassador YAML, including TLSSecrets
# and Endpoints and whatnot, as needed. All of it will get read by
# load_from_filesystem and end up in the elements array.
# - If you're running expecting to be fed by kubewatch, at present kubewatch will
# send over K8s Service records, and anything annotated in there will end up in
# elements. This may include TLSSecrets or Endpoints. Any TLSSecret mentioned that
# isn't already in elements will need to be fetched.
# - Ambassador resources do not have namespaces. They have the ambassador_id. That's
# it. The ambassador_id is completely orthogonal to the namespace. No element with
# the wrong ambassador_id will end up in elements. It would be nice if they were
# never sent by kubewatch, but, well, y'know.
# - TLSSecret resources are not TLSContexts. TLSSecrets only have a name, a private
# half, and a public half. They do _not_ have other TLSContext information.
# - Endpoint resources probably have just a name, a service name, and an endpoint
# address.
class ResourceFetcher:
def __init__(self, logger: logging.Logger, aconf: 'Config') -> None:
self.aconf = aconf
self.logger = logger
self.elements: List[ACResource] = []
self.filename: Optional[str] = None
self.ocount: int = 1
self.saved: List[Tuple[Optional[str], int]] = []
self.k8s_endpoints: Dict[str, AnyDict] = {}
self.k8s_services: Dict[str, AnyDict] = {}
self.services: Dict[str, AnyDict] = {}
@property
def location(self):
return "%s.%d" % (self.filename or "anonymous YAML", self.ocount)
def push_location(self, filename: Optional[str], ocount: int) -> None:
self.saved.append((self.filename, self.ocount))
self.filename = filename
self.ocount = ocount
def pop_location(self) -> None:
self.filename, self.ocount = self.saved.pop()
def load_from_filesystem(self, config_dir_path, recurse: bool=False, k8s: bool=False):
inputs: List[Tuple[str, str]] = []
if os.path.isdir(config_dir_path):
dirs = [ config_dir_path ]
while dirs:
dirpath = dirs.pop(0)
for filename in os.listdir(dirpath):
filepath = os.path.join(dirpath, filename)
if recurse and os.path.isdir(filepath):
# self.logger.debug("%s: RECURSE" % filepath)
dirs.append(filepath)
continue
if not os.path.isfile(filepath):
# self.logger.debug("%s: SKIP non-file" % filepath)
continue
if not filename.lower().endswith('.yaml'):
# self.logger.debug("%s: SKIP non-YAML" % filepath)
continue
# self.logger.debug("%s: SAVE configuration file" % filepath)
inputs.append((filepath, filename))
else:
# this allows a file to be passed into the ambassador cli
# rather than just a directory
inputs.append((config_dir_path, os.path.basename(config_dir_path)))
for filepath, filename in inputs:
self.logger.info("reading %s (%s)" % (filename, filepath))
try:
serialization = open(filepath, "r").read()
self.parse_yaml(serialization, k8s=k8s, filename=filename)
except IOError as e:
self.aconf.post_error("could not read YAML from %s: %s" % (filepath, e))
self.finalize()
def parse_yaml(self, serialization: str, k8s=False, rkey: Optional[str]=None,
filename: Optional[str]=None) -> None:
# self.logger.debug("%s: parsing %d byte%s of YAML:\n%s" %
# (self.location, len(serialization), "" if (len(serialization) == 1) else "s",
# serialization))
try:
objects = parse_yaml(serialization)
self.parse_object(objects=objects, k8s=k8s, rkey=rkey, filename=filename)
except yaml.error.YAMLError as e:
self.aconf.post_error("%s: could not parse YAML: %s" % (self.location, e))
self.finalize()
def parse_json(self, serialization: str, k8s=False, rkey: Optional[str]=None,
filename: Optional[str]=None) -> None:
# self.logger.debug("%s: parsing %d byte%s of YAML:\n%s" %
# (self.location, len(serialization), "" if (len(serialization) == 1) else "s",
# serialization))
try:
objects = json.loads(serialization)
self.parse_object(objects=objects, k8s=k8s, rkey=rkey, filename=filename)
except json.decoder.JSONDecodeError as e:
self.aconf.post_error("%s: could not parse YAML: %s" % (self.location, e))
self.finalize()
def parse_watt(self, serialization: str) -> None:
basedir = os.environ.get('AMBASSADOR_CONFIG_BASE_DIR', '/ambassador')
if os.path.isfile(os.path.join(basedir, '.ambassador_ignore_crds')):
self.aconf.post_error("Ambassador could not find core CRD definitions. Please visit https://www.getambassador.io/reference/core/crds/ for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...")
if os.path.isfile(os.path.join(basedir, '.ambassador_ignore_crds_2')):
self.aconf.post_error("Ambassador could not find Resolver type CRD definitions. Please visit https://www.getambassador.io/reference/core/crds/ for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...")
try:
watt_dict = json.loads(serialization)
watt_k8s = watt_dict.get('Kubernetes', {})
# Handle normal Kube objects...
for key in [ 'service', 'endpoints', 'secret' ]:
for obj in watt_k8s.get(key) or []:
self.handle_k8s(obj)
# ...then handle Ambassador CRDs.
for key in [ 'AuthService', 'ConsulResolver',
'KubernetesEndpointResolver', 'KubernetesServiceResolver',
'Mapping', 'Module', 'RateLimitService',
'TCPMapping', 'TLSContext', 'TracingService']:
for obj in watt_k8s.get(key) or []:
self.handle_k8s_crd(obj)
watt_consul = watt_dict.get('Consul', {})
consul_endpoints = watt_consul.get('Endpoints', {})
for consul_rkey, consul_object in consul_endpoints.items():
result = self.handle_consul_service(consul_rkey, consul_object)
if result:
rkey, parsed_objects = result
self.parse_object(parsed_objects, k8s=False,
filename=self.filename, rkey=rkey)
except json.decoder.JSONDecodeError as e:
self.aconf.post_error("%s: could not parse WATT: %s" % (self.location, e))
self.finalize()
def handle_k8s(self, obj: dict) -> None:
# self.logger.debug("handle_k8s obj %s" % json.dumps(obj, indent=4, sort_keys=True))
kind = obj.get('kind')
if not kind:
# self.logger.debug("%s: ignoring K8s object, no kind" % self.location)
return
handler_name = f'handle_k8s_{kind.lower()}'
handler = getattr(self, handler_name, None)
if not handler:
# self.logger.debug("%s: ignoring K8s object, no kind" % self.location)
return
result = handler(obj)
if result:
rkey, parsed_objects = result
self.parse_object(parsed_objects, k8s=False,
filename=self.filename, rkey=rkey)
def handle_k8s_crd(self, obj: dict) -> None:
# CRDs are _not_ allowed to have embedded objects in annotations, because ew.
kind = obj.get('kind')
if not kind:
self.logger.debug("%s: ignoring K8s CRD, no kind" % self.location)
return
apiVersion = obj.get('apiVersion')
metadata = obj.get('metadata') or {}
name = metadata.get('name')
namespace = metadata.get('namespace') or 'default'
spec = obj.get('spec') or {}
if not name:
self.logger.debug(f'{self.location}: ignoring K8s {kind} CRD, no name')
return
if not apiVersion:
self.logger.debug(f'{self.location}: ignoring K8s {kind} CRD {name}: no apiVersion')
return
# if not spec:
# self.logger.debug(f'{self.location}: ignoring K8s {kind} CRD {name}: no spec')
# return
# We use this resource identifier as a key into self.k8s_services, and of course for logging .
resource_identifier = f'{name}.{namespace}'
# OK. Shallow copy 'spec'...
amb_object = dict(spec)
# ...and then stuff in a couple of other things.
amb_object['apiVersion'] = apiVersion
amb_object['name'] = name
amb_object['kind'] = kind
# Done. Parse it.
self.parse_object([ amb_object ], k8s=False, filename=self.filename, rkey=resource_identifier)
def parse_object(self, objects, k8s=False, rkey: Optional[str]=None, filename: Optional[str]=None):
self.push_location(filename, 1)
# self.logger.debug("PARSE_OBJECT: incoming %d" % len(objects))
for obj in objects:
self.logger.debug("PARSE_OBJECT: checking %s" % obj)
if k8s:
self.handle_k8s(obj)
else:
# if not obj:
# self.logger.debug("%s: empty object from %s" % (self.location, serialization))
self.process_object(obj, rkey=rkey)
self.ocount += 1
self.pop_location()
def process_object(self, obj: dict, rkey: Optional[str]=None) -> None:
if not isinstance(obj, dict):
# Bug!!
if not obj:
self.aconf.post_error("%s is empty" % self.location)
else:
self.aconf.post_error("%s is not a dictionary? %s" %
(self.location, json.dumps(obj, indent=4, sort_keys=4)))
return
if not self.aconf.good_ambassador_id(obj):
# self.logger.debug("%s ignoring K8s Service with mismatched ambassador_id" % self.location)
return
if 'kind' not in obj:
# Bug!!
self.aconf.post_error("%s is missing 'kind'?? %s" %
(self.location, json.dumps(obj, indent=4, sort_keys=True)))
return
# self.logger.debug("%s PROCESS %s initial rkey %s" % (self.location, obj['kind'], rkey))
# Is this a pragma object?
if obj['kind'] == 'Pragma':
# Why did I think this was a good idea? [ :) ]
new_source = obj.get('source', None)
if new_source:
# We don't save the old self.filename here, so this change will last until
# the next input source (or the next Pragma).
self.filename = new_source
# Don't count Pragma objects, since the user generally doesn't write them.
self.ocount -= 1
return
if not rkey:
rkey = self.filename
rkey = "%s.%d" % (rkey, self.ocount)
# self.logger.debug("%s PROCESS %s updated rkey to %s" % (self.location, obj['kind'], rkey))
# Fine. Fine fine fine.
serialization = dump_yaml(obj, default_flow_style=False)
r = ACResource.from_dict(rkey, rkey, serialization, obj)
self.elements.append(r)
# self.logger.debug("%s PROCESS %s save %s: %s" % (self.location, obj['kind'], rkey, serialization))
def sorted(self, key=lambda x: x.rkey): # returns an iterator, probably
return sorted(self.elements, key=key)
def handle_k8s_endpoints(self, k8s_object: AnyDict) -> HandlerResult:
# Don't include Endpoints unless endpoint routing is enabled.
if not Config.enable_endpoints:
return None
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
resource_subsets = k8s_object.get('subsets', None)
skip = False
if not metadata:
self.logger.debug("ignoring K8s Endpoints with no metadata")
skip = True
if not resource_name:
self.logger.debug("ignoring K8s Endpoints with no name")
skip = True
if not resource_subsets:
self.logger.debug(f"ignoring K8s Endpoints {resource_name}.{resource_namespace} with no subsets")
skip = True
if skip:
return None
# We use this resource identifier as a key into self.k8s_services, and of course for logging .
resource_identifier = '{name}.{namespace}'.format(namespace=resource_namespace, name=resource_name)
# K8s Endpoints resources are _stupid_ in that they give you a vector of
# IP addresses and a vector of ports, and you have to assume that every
# IP address listens on every port, and that the semantics of each port
# are identical. The first is usually a good assumption. The second is not:
# people routinely list 80 and 443 for the same service, for example,
# despite the fact that one is HTTP and the other is HTTPS.
#
# By the time the ResourceFetcher is done, we want to be working with
# Ambassador Service resources, which have an array of address:port entries
# for endpoints. So we're going to extract the address and port numbers
# as arrays of tuples and stash them for later.
#
# In Kubernetes-speak, the Endpoints resource has some metadata and a set
# of "subsets" (though I've personally never seen more than one subset in
# one of these things).
for subset in resource_subsets:
# K8s subset addresses have some node info in with the IP address.
# May as well save that too.
addresses = []
for address in subset.get('addresses', []):
addr = {}
ip = address.get('ip', None)
if ip is not None:
addr['ip'] = ip
node = address.get('nodeName', None)
if node is not None:
addr['node'] = node
target_ref = address.get('targetRef', None)
if target_ref is not None:
target_kind = target_ref.get('kind', None)
if target_kind is not None:
addr['target_kind'] = target_kind
target_name = target_ref.get('name', None)
if target_name is not None:
addr['target_name'] = target_name
target_namespace = target_ref.get('namespace', None)
if target_namespace is not None:
addr['target_namespace'] = target_namespace
if len(addr) > 0:
addresses.append(addr)
# If we got no addresses, there's no point in messing with ports.
if len(addresses) == 0:
continue
ports = subset.get('ports', [])
# A service can reference a port either by name or by port number.
port_dict = {}
for port in ports:
port_name = port.get('name', None)
port_number = port.get('port', None)
port_proto = port.get('protocol', 'TCP').upper()
if port_proto != 'TCP':
continue
if port_number is None:
# WTFO.
continue
port_dict[str(port_number)] = port_number
if port_name:
port_dict[port_name] = port_number
if port_dict:
# We're not going to actually return this: we'll just stash it for our
# later resolution pass.
self.k8s_endpoints[resource_identifier] = {
'name': resource_name,
'namespace': resource_namespace,
'addresses': addresses,
'ports': port_dict
}
else:
self.logger.debug(f"ignoring K8s Endpoints {resource_identifier} with no routable ports")
return None
def handle_k8s_service(self, k8s_object: AnyDict) -> HandlerResult:
# The annoying bit about K8s Service resources is that not only do we have to look
# inside them for Ambassador resources, but we also have to save their info for
# later endpoint resolution too.
#
# Again, we're trusting that the input isn't overly bloated on that latter bit.
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
annotations = metadata.get('annotations', None) if metadata else None
if annotations:
annotations = annotations.get('getambassador.io/config', None)
skip = False
if not metadata:
self.logger.debug("ignoring K8s Service with no metadata")
skip = True
if not skip and not resource_name:
self.logger.debug("ignoring K8s Service with no name")
skip = True
if not skip and (Config.single_namespace and (resource_namespace != Config.ambassador_namespace)):
# This should never happen in actual usage, since we shouldn't be given things
# in the wrong namespace. However, in development, this can happen a lot.
self.logger.debug(f"ignoring K8s Service {resource_name}.{resource_namespace} in wrong namespace")
skip = True
if skip:
return None
# We use this resource identifier as a key into self.k8s_services, and of course for logging .
resource_identifier = f'{resource_name}.{resource_namespace}'
# Not skipping. First, if we have some actual ports, stash this in self.k8s_services
# for later resolution.
spec = k8s_object.get('spec', None)
ports = spec.get('ports', None) if spec else None
if spec and ports:
self.k8s_services[resource_identifier] = {
'name': resource_name,
'namespace': resource_namespace,
'ports': ports
}
else:
self.logger.debug(f"not saving K8s Service {resource_name}.{resource_namespace} with no ports")
objects: List[Any] = []
if annotations:
if (self.filename is not None) and (not self.filename.endswith(":annotation")):
self.filename += ":annotation"
try:
objects = parse_yaml(annotations)
except yaml.error.YAMLError as e:
self.logger.debug("could not parse YAML: %s" % e)
return resource_identifier, objects
# Handler for K8s Secret resources.
def handle_k8s_secret(self, k8s_object: AnyDict) -> HandlerResult:
# XXX Another one where we shouldn't be saving everything.
secret_type = k8s_object.get('type', None)
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
data = k8s_object.get('data', None)
skip = False
if (secret_type != 'kubernetes.io/tls') and (secret_type != 'Opaque'):
self.logger.debug("ignoring K8s Secret with unknown type %s" % secret_type)
skip = True
if not data:
self.logger.debug("ignoring K8s Secret with no data")
skip = True
if not metadata:
self.logger.debug("ignoring K8s Secret with no metadata")
skip = True
if not resource_name:
self.logger.debug("ignoring K8s Secret with no name")
skip = True
if not skip and (Config.single_namespace and (resource_namespace != Config.ambassador_namespace)):
# This should never happen in actual usage, since we shouldn't be given things
# in the wrong namespace. However, in development, this can happen a lot.
self.logger.debug("ignoring K8s Secret in wrong namespace")
skip = True
if skip:
return None
# This resource identifier is useful for log output since filenames can be duplicated (multiple subdirectories)
resource_identifier = f'{resource_name}.{resource_namespace}'
tls_crt = data.get('tls.crt', None)
tls_key = data.get('tls.key', None)
if not tls_crt and not tls_key:
# Uh. WTFO?
self.logger.debug(f'ignoring K8s Secret {resource_identifier} with no keys')
return None
# No need to muck about with resolution later, just immediately turn this
# into an Ambassador Secret resource.
secret_info = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Secret',
'name': resource_name,
'namespace': resource_namespace
}
if tls_crt:
secret_info['tls_crt'] = tls_crt
if tls_key:
secret_info['tls_key'] = tls_key
return resource_identifier, [ secret_info ]
# Handler for Consul services
def handle_consul_service(self,
consul_rkey: str, consul_object: AnyDict) -> HandlerResult:
# resource_identifier = f'consul-{consul_rkey}'
endpoints = consul_object.get('Endpoints', [])
name = consul_object.get('Service', consul_rkey)
if len(endpoints) < 1:
# Bzzt.
self.logger.debug(f"ignoring Consul service {name} with no Endpoints")
return None
# We can turn this directly into an Ambassador Service resource, since Consul keeps
# services and endpoints together (as it should!!).
#
# Note that we currently trust the association ID to contain the datacenter name.
# That's a function of the watch_hook putting it there.
svc = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Service',
'name': name,
'datacenter': consul_object.get('Id') or 'dc1',
'endpoints': {}
}
for ep in endpoints:
ep_addr = ep.get('Address')
ep_port = ep.get('Port')
if not ep_addr or not ep_port:
self.logger.debug(f"ignoring Consul service {name} endpoint {ep["ID"]} missing address info")
continue
# Consul services don't have the weird indirections that Kube services do, so just
# lump all the endpoints together under the same source port of '*'.
svc_eps = svc['endpoints'].setdefault('*', [])
svc_eps.append({
'ip': ep_addr,
'port': ep_port,
'target_kind': 'Consul'
})
# Once again: don't return this. Instead, save it in self.services.
self.services[f"consul-{name}-{svc["datacenter"]}"] = svc
return None
def finalize(self) -> None:
# The point here is to sort out self.k8s_services and self.k8s_endpoints and
# turn them into proper Ambassador Service resources. This is a bit annoying,
# because of the annoyances of Kubernetes, but we'll give it a go.
#
# Here are the rules:
#
# 1. By the time we get here, we have a _complete_ set of Ambassador resources that
# have passed muster by virtue of having the correct namespace, the correct
# ambassador_id, etc. (They may have duplicate names at this point, admittedly.)
# Any service not mentioned by name is out. Since the Ambassador resources in
# self.elements are in fact AResources, we can farm this out to code for each
# resource.
#
# 2. The check is, by design, permissive. If in doubt, write the check to leave
# the resource in.
#
# 3. For any service that stays in, we vet its listed ports against self.k8s_endpoints.
# Anything with no matching ports is _not_ dropped; it is assumed to use service
# routing rather than endpoint routing.
od = {
'elements': [ x.as_dict() for x in self.elements ],
'k8s_endpoints': self.k8s_endpoints,
'k8s_services': self.k8s_services,
'services': self.services
}
# self.logger.debug("==== FINALIZE START\n%s" % json.dumps(od, sort_keys=True, indent=4))
for key, k8s_svc in self.k8s_services.items():
# See if we can find endpoints for this service.
k8s_ep = self.k8s_endpoints.get(key, None)
k8s_ep_ports = k8s_ep.get('ports', None) if k8s_ep else None
k8s_name = k8s_svc['name']
k8s_namespace = k8s_svc['namespace']
# OK, Kube is weird. The way all this works goes like this:
#
# 1. When you create a Kube Service, Kube will allocate a clusterIP
# for it and update DNS to resolve the name of the service to
# that clusterIP.
# 2. Kube will look over the pods matched by the Service's selectors
# and stick those pods' IP addresses into Endpoints for the Service.
# 3. The Service will have ports listed. These service.port entries can
# contain:
# port -- a port number you can talk to at the clusterIP
# name -- a name for this port
# targetPort -- a port number you can talk to at the _endpoint_ IP
# We'll call the 'port' entry here the "service-port".
# 4. If you talk to clusterIP:service-port, you will get magically
# proxied by the Kube CNI to a target port at one of the endpoint IPs.
#
# The $64K question is: how does Kube decide which target port to use?
#
# First, if there's only one endpoint port, that's the one that gets used.
#
# If there's more than one, if the Service's port entry has a targetPort
# number, it uses that. Otherwise it tries to find an endpoint port with
# the same name as the service port. Otherwise, I dunno, it punts and uses
# the service-port.
#
# So that's how Ambassador is going to do it, for each Service port entry.
#
# If we have no endpoints at all, Ambassador will end up routing using
# just the service name and port per the Mapping's service spec.
target_ports = {}
target_addrs = []
svc_endpoints = {}
if not k8s_ep or not k8s_ep_ports:
# No endpoints at all, so we're done with this service.
self.logger.debug(f'{key}: no endpoints at all')
else:
idx = -1
for port in k8s_svc['ports']:
idx += 1
k8s_target: Optional[int] = None
src_port = port.get('port', None)
if not src_port:
# WTFO. This is impossible.
self.logger.error(f"Kubernetes service {key} has no port number at index {idx}?")
continue
if len(k8s_ep_ports) == 1:
# Just one endpoint port. Done.
k8s_target = list(k8s_ep_ports.values())[0]
target_ports[src_port] = k8s_target
self.logger.debug(f'{key} port {src_port}: single endpoint port {k8s_target}')
continue
# Hmmm, we need to try to actually map whatever ports are listed for
# this service. Oh well.
found_key = False
fallback: Optional[int] = None
for attr in [ 'targetPort', 'name', 'port' ]:
port_key = port.get(attr) # This could be a name or a number, in general.
if port_key:
found_key = True
if not fallback and (port_key != 'name') and str(port_key).isdigit():
# fallback can only be digits.
fallback = port_key
# Do we have a destination port for this?
k8s_target = k8s_ep_ports.get(str(port_key), None)
if k8s_target:
self.logger.debug(f'{key} port {src_port} #{idx}: {attr} {port_key} -> {k8s_target}')
break
else:
self.logger.debug(f'{key} port {src_port} #{idx}: {attr} {port_key} -> miss')
if not found_key:
# WTFO. This is impossible.
self.logger.error(f"Kubernetes service {key} port {src_port} has an empty port spec at index {idx}?")
continue
if not k8s_target:
# This is most likely because we don't have endpoint info at all, so we'll do service
# routing.
#
# It's actually impossible for fallback to be unset, but WTF.
k8s_target = fallback or src_port
self.logger.debug(f'{key} port {src_port} #{idx}: falling back to {k8s_target}')
target_ports[src_port] = k8s_target
if not target_ports:
# WTFO. This is impossible. I guess we'll fall back to service routing.
self.logger.error(f"Kubernetes service {key} has no routable ports at all?")
# OK. Once _that's_ done we have to take the endpoint addresses into
# account, or just use the service name if we don't have that.
k8s_ep_addrs = k8s_ep.get('addresses', None)
if k8s_ep_addrs:
for addr in k8s_ep_addrs:
ip = addr.get('ip', None)
if ip:
target_addrs.append(ip)
# OK! If we have no target addresses, just use service routing.
if not target_addrs:
self.logger.debug(f'{key} falling back to service routing')
target_addrs = [ key ]
for src_port, target_port in target_ports.items():
svc_endpoints[src_port] = [ {
'ip': target_addr,
'port': target_port
} for target_addr in target_addrs ]
# Nope. Set this up for service routing.
self.services[f'k8s-{k8s_name}-{k8s_namespace}'] = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Service',
'name': k8s_name,
'namespace': k8s_namespace,
'endpoints': svc_endpoints
}
# OK. After all that, go turn all of the things in self.services into Ambassador
# Service resources.
for key, svc in self.services.items():
serialization = dump_yaml(svc, default_flow_style=False)
r = ACResource.from_dict(key, key, serialization, svc)
self.elements.append(r)
od = {
'elements': [ x.as_dict() for x in self.elements ],
'k8s_endpoints': self.k8s_endpoints,
'k8s_services': self.k8s_services,
'services': self.services
}
# self.logger.debug("==== FINALIZE END\n%s" % json.dumps(od, sort_keys=True, indent=4))
|
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
# from typing import cast as typecast
import json
import logging
import os
import yaml
from .config import Config
from .acresource import ACResource
from ..utils import parse_yaml, dump_yaml
AnyDict = Dict[str, Any]
HandlerResult = Optional[Tuple[str, List[AnyDict]]]
# Some thoughts:
# - loading a bunch of Ambassador resources is different from loading a bunch of K8s
# services, because we should assume that if we're being a fed a bunch of Ambassador
# resources, we'll get a full set. The whole 'secret loader' thing needs to have the
# concept of a TLSSecret resource that can be force-fed to us, or that can be fetched
# through the loader if needed.
# - If you're running a debug-loop Ambassador, you should just have a flat (or
# recursive, I don't care) directory full of Ambassador YAML, including TLSSecrets
# and Endpoints and whatnot, as needed. All of it will get read by
# load_from_filesystem and end up in the elements array.
# - If you're running expecting to be fed by kubewatch, at present kubewatch will
# send over K8s Service records, and anything annotated in there will end up in
# elements. This may include TLSSecrets or Endpoints. Any TLSSecret mentioned that
# isn't already in elements will need to be fetched.
# - Ambassador resources do not have namespaces. They have the ambassador_id. That's
# it. The ambassador_id is completely orthogonal to the namespace. No element with
# the wrong ambassador_id will end up in elements. It would be nice if they were
# never sent by kubewatch, but, well, y'know.
# - TLSSecret resources are not TLSContexts. TLSSecrets only have a name, a private
# half, and a public half. They do _not_ have other TLSContext information.
# - Endpoint resources probably have just a name, a service name, and an endpoint
# address.
class ResourceFetcher:
def __init__(self, logger: logging.Logger, aconf: 'Config') -> None:
self.aconf = aconf
self.logger = logger
self.elements: List[ACResource] = []
self.filename: Optional[str] = None
self.ocount: int = 1
self.saved: List[Tuple[Optional[str], int]] = []
self.k8s_endpoints: Dict[str, AnyDict] = {}
self.k8s_services: Dict[str, AnyDict] = {}
self.services: Dict[str, AnyDict] = {}
@property
def location(self):
return "%s.%d" % (self.filename or "anonymous YAML", self.ocount)
def push_location(self, filename: Optional[str], ocount: int) -> None:
self.saved.append((self.filename, self.ocount))
self.filename = filename
self.ocount = ocount
def pop_location(self) -> None:
self.filename, self.ocount = self.saved.pop()
def load_from_filesystem(self, config_dir_path, recurse: bool=False, k8s: bool=False):
inputs: List[Tuple[str, str]] = []
if os.path.isdir(config_dir_path):
dirs = [ config_dir_path ]
while dirs:
dirpath = dirs.pop(0)
for filename in os.listdir(dirpath):
filepath = os.path.join(dirpath, filename)
if recurse and os.path.isdir(filepath):
# self.logger.debug("%s: RECURSE" % filepath)
dirs.append(filepath)
continue
if not os.path.isfile(filepath):
# self.logger.debug("%s: SKIP non-file" % filepath)
continue
if not filename.lower().endswith('.yaml'):
# self.logger.debug("%s: SKIP non-YAML" % filepath)
continue
# self.logger.debug("%s: SAVE configuration file" % filepath)
inputs.append((filepath, filename))
else:
# this allows a file to be passed into the ambassador cli
# rather than just a directory
inputs.append((config_dir_path, os.path.basename(config_dir_path)))
for filepath, filename in inputs:
self.logger.info("reading %s (%s)" % (filename, filepath))
try:
serialization = open(filepath, "r").read()
self.parse_yaml(serialization, k8s=k8s, filename=filename)
except IOError as e:
self.aconf.post_error("could not read YAML from %s: %s" % (filepath, e))
self.finalize()
def parse_yaml(self, serialization: str, k8s=False, rkey: Optional[str]=None,
filename: Optional[str]=None) -> None:
# self.logger.debug("%s: parsing %d byte%s of YAML:\n%s" %
# (self.location, len(serialization), "" if (len(serialization) == 1) else "s",
# serialization))
try:
objects = parse_yaml(serialization)
self.parse_object(objects=objects, k8s=k8s, rkey=rkey, filename=filename)
except yaml.error.YAMLError as e:
self.aconf.post_error("%s: could not parse YAML: %s" % (self.location, e))
self.finalize()
def parse_json(self, serialization: str, k8s=False, rkey: Optional[str]=None,
filename: Optional[str]=None) -> None:
# self.logger.debug("%s: parsing %d byte%s of YAML:\n%s" %
# (self.location, len(serialization), "" if (len(serialization) == 1) else "s",
# serialization))
try:
objects = json.loads(serialization)
self.parse_object(objects=objects, k8s=k8s, rkey=rkey, filename=filename)
except json.decoder.JSONDecodeError as e:
self.aconf.post_error("%s: could not parse YAML: %s" % (self.location, e))
self.finalize()
def parse_watt(self, serialization: str) -> None:
basedir = os.environ.get('AMBASSADOR_CONFIG_BASE_DIR', '/ambassador')
if os.path.isfile(os.path.join(basedir, '.ambassador_ignore_crds')):
self.aconf.post_error("Ambassador could not find core CRD definitions. Please visit https://www.getambassador.io/reference/core/crds/ for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...")
if os.path.isfile(os.path.join(basedir, '.ambassador_ignore_crds_2')):
self.aconf.post_error("Ambassador could not find Resolver type CRD definitions. Please visit https://www.getambassador.io/reference/core/crds/ for more information. You can continue using Ambassador via Kubernetes annotations, any configuration via CRDs will be ignored...")
try:
watt_dict = json.loads(serialization)
watt_k8s = watt_dict.get('Kubernetes', {})
# Handle normal Kube objects...
for key in [ 'service', 'endpoints', 'secret' ]:
for obj in watt_k8s.get(key) or []:
self.handle_k8s(obj)
# ...then handle Ambassador CRDs.
for key in [ 'AuthService', 'ConsulResolver',
'KubernetesEndpointResolver', 'KubernetesServiceResolver',
'Mapping', 'Module', 'RateLimitService',
'TCPMapping', 'TLSContext', 'TracingService']:
for obj in watt_k8s.get(key) or []:
self.handle_k8s_crd(obj)
watt_consul = watt_dict.get('Consul', {})
consul_endpoints = watt_consul.get('Endpoints', {})
for consul_rkey, consul_object in consul_endpoints.items():
result = self.handle_consul_service(consul_rkey, consul_object)
if result:
rkey, parsed_objects = result
self.parse_object(parsed_objects, k8s=False,
filename=self.filename, rkey=rkey)
except json.decoder.JSONDecodeError as e:
self.aconf.post_error("%s: could not parse WATT: %s" % (self.location, e))
self.finalize()
def handle_k8s(self, obj: dict) -> None:
# self.logger.debug("handle_k8s obj %s" % json.dumps(obj, indent=4, sort_keys=True))
kind = obj.get('kind')
if not kind:
# self.logger.debug("%s: ignoring K8s object, no kind" % self.location)
return
handler_name = f'handle_k8s_{kind.lower()}'
handler = getattr(self, handler_name, None)
if not handler:
# self.logger.debug("%s: ignoring K8s object, no kind" % self.location)
return
result = handler(obj)
if result:
rkey, parsed_objects = result
self.parse_object(parsed_objects, k8s=False,
filename=self.filename, rkey=rkey)
def handle_k8s_crd(self, obj: dict) -> None:
# CRDs are _not_ allowed to have embedded objects in annotations, because ew.
kind = obj.get('kind')
if not kind:
self.logger.debug("%s: ignoring K8s CRD, no kind" % self.location)
return
apiVersion = obj.get('apiVersion')
metadata = obj.get('metadata') or {}
name = metadata.get('name')
namespace = metadata.get('namespace') or 'default'
spec = obj.get('spec') or {}
if not name:
self.logger.debug(f'{self.location}: ignoring K8s {kind} CRD, no name')
return
if not apiVersion:
self.logger.debug(f'{self.location}: ignoring K8s {kind} CRD {name}: no apiVersion')
return
# if not spec:
# self.logger.debug(f'{self.location}: ignoring K8s {kind} CRD {name}: no spec')
# return
# We use this resource identifier as a key into self.k8s_services, and of course for logging .
resource_identifier = f'{name}.{namespace}'
# OK. Shallow copy 'spec'...
amb_object = dict(spec)
# ...and then stuff in a couple of other things.
amb_object['apiVersion'] = apiVersion
amb_object['name'] = name
amb_object['kind'] = kind
# Done. Parse it.
self.parse_object([ amb_object ], k8s=False, filename=self.filename, rkey=resource_identifier)
def parse_object(self, objects, k8s=False, rkey: Optional[str]=None, filename: Optional[str]=None):
self.push_location(filename, 1)
# self.logger.debug("PARSE_OBJECT: incoming %d" % len(objects))
for obj in objects:
self.logger.debug("PARSE_OBJECT: checking %s" % obj)
if k8s:
self.handle_k8s(obj)
else:
# if not obj:
# self.logger.debug("%s: empty object from %s" % (self.location, serialization))
self.process_object(obj, rkey=rkey)
self.ocount += 1
self.pop_location()
def process_object(self, obj: dict, rkey: Optional[str]=None) -> None:
if not isinstance(obj, dict):
# Bug!!
if not obj:
self.aconf.post_error("%s is empty" % self.location)
else:
self.aconf.post_error("%s is not a dictionary? %s" %
(self.location, json.dumps(obj, indent=4, sort_keys=4)))
return
if not self.aconf.good_ambassador_id(obj):
# self.logger.debug("%s ignoring K8s Service with mismatched ambassador_id" % self.location)
return
if 'kind' not in obj:
# Bug!!
self.aconf.post_error("%s is missing 'kind'?? %s" %
(self.location, json.dumps(obj, indent=4, sort_keys=True)))
return
# self.logger.debug("%s PROCESS %s initial rkey %s" % (self.location, obj['kind'], rkey))
# Is this a pragma object?
if obj['kind'] == 'Pragma':
# Why did I think this was a good idea? [ :) ]
new_source = obj.get('source', None)
if new_source:
# We don't save the old self.filename here, so this change will last until
# the next input source (or the next Pragma).
self.filename = new_source
# Don't count Pragma objects, since the user generally doesn't write them.
self.ocount -= 1
return
if not rkey:
rkey = self.filename
rkey = "%s.%d" % (rkey, self.ocount)
# self.logger.debug("%s PROCESS %s updated rkey to %s" % (self.location, obj['kind'], rkey))
# Fine. Fine fine fine.
serialization = dump_yaml(obj, default_flow_style=False)
r = ACResource.from_dict(rkey, rkey, serialization, obj)
self.elements.append(r)
# self.logger.debug("%s PROCESS %s save %s: %s" % (self.location, obj['kind'], rkey, serialization))
def sorted(self, key=lambda x: x.rkey): # returns an iterator, probably
return sorted(self.elements, key=key)
def handle_k8s_endpoints(self, k8s_object: AnyDict) -> HandlerResult:
# Don't include Endpoints unless endpoint routing is enabled.
if not Config.enable_endpoints:
return None
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
resource_subsets = k8s_object.get('subsets', None)
skip = False
if not metadata:
self.logger.debug("ignoring K8s Endpoints with no metadata")
skip = True
if not resource_name:
self.logger.debug("ignoring K8s Endpoints with no name")
skip = True
if not resource_subsets:
self.logger.debug(f"ignoring K8s Endpoints {resource_name}.{resource_namespace} with no subsets")
skip = True
if skip:
return None
# We use this resource identifier as a key into self.k8s_services, and of course for logging .
resource_identifier = '{name}.{namespace}'.format(namespace=resource_namespace, name=resource_name)
# K8s Endpoints resources are _stupid_ in that they give you a vector of
# IP addresses and a vector of ports, and you have to assume that every
# IP address listens on every port, and that the semantics of each port
# are identical. The first is usually a good assumption. The second is not:
# people routinely list 80 and 443 for the same service, for example,
# despite the fact that one is HTTP and the other is HTTPS.
#
# By the time the ResourceFetcher is done, we want to be working with
# Ambassador Service resources, which have an array of address:port entries
# for endpoints. So we're going to extract the address and port numbers
# as arrays of tuples and stash them for later.
#
# In Kubernetes-speak, the Endpoints resource has some metadata and a set
# of "subsets" (though I've personally never seen more than one subset in
# one of these things).
for subset in resource_subsets:
# K8s subset addresses have some node info in with the IP address.
# May as well save that too.
addresses = []
for address in subset.get('addresses', []):
addr = {}
ip = address.get('ip', None)
if ip is not None:
addr['ip'] = ip
node = address.get('nodeName', None)
if node is not None:
addr['node'] = node
target_ref = address.get('targetRef', None)
if target_ref is not None:
target_kind = target_ref.get('kind', None)
if target_kind is not None:
addr['target_kind'] = target_kind
target_name = target_ref.get('name', None)
if target_name is not None:
addr['target_name'] = target_name
target_namespace = target_ref.get('namespace', None)
if target_namespace is not None:
addr['target_namespace'] = target_namespace
if len(addr) > 0:
addresses.append(addr)
# If we got no addresses, there's no point in messing with ports.
if len(addresses) == 0:
continue
ports = subset.get('ports', [])
# A service can reference a port either by name or by port number.
port_dict = {}
for port in ports:
port_name = port.get('name', None)
port_number = port.get('port', None)
port_proto = port.get('protocol', 'TCP').upper()
if port_proto != 'TCP':
continue
if port_number is None:
# WTFO.
continue
port_dict[str(port_number)] = port_number
if port_name:
port_dict[port_name] = port_number
if port_dict:
# We're not going to actually return this: we'll just stash it for our
# later resolution pass.
self.k8s_endpoints[resource_identifier] = {
'name': resource_name,
'namespace': resource_namespace,
'addresses': addresses,
'ports': port_dict
}
else:
self.logger.debug(f"ignoring K8s Endpoints {resource_identifier} with no routable ports")
return None
def handle_k8s_service(self, k8s_object: AnyDict) -> HandlerResult:
# The annoying bit about K8s Service resources is that not only do we have to look
# inside them for Ambassador resources, but we also have to save their info for
# later endpoint resolution too.
#
# Again, we're trusting that the input isn't overly bloated on that latter bit.
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
annotations = metadata.get('annotations', None) if metadata else None
if annotations:
annotations = annotations.get('getambassador.io/config', None)
skip = False
if not metadata:
self.logger.debug("ignoring K8s Service with no metadata")
skip = True
if not skip and not resource_name:
self.logger.debug("ignoring K8s Service with no name")
skip = True
if not skip and (Config.single_namespace and (resource_namespace != Config.ambassador_namespace)):
# This should never happen in actual usage, since we shouldn't be given things
# in the wrong namespace. However, in development, this can happen a lot.
self.logger.debug(f"ignoring K8s Service {resource_name}.{resource_namespace} in wrong namespace")
skip = True
if skip:
return None
# We use this resource identifier as a key into self.k8s_services, and of course for logging .
resource_identifier = f'{resource_name}.{resource_namespace}'
# Not skipping. First, if we have some actual ports, stash this in self.k8s_services
# for later resolution.
spec = k8s_object.get('spec', None)
ports = spec.get('ports', None) if spec else None
if spec and ports:
self.k8s_services[resource_identifier] = {
'name': resource_name,
'namespace': resource_namespace,
'ports': ports
}
else:
self.logger.debug(f"not saving K8s Service {resource_name}.{resource_namespace} with no ports")
objects: List[Any] = []
if annotations:
if (self.filename is not None) and (not self.filename.endswith(":annotation")):
self.filename += ":annotation"
try:
objects = parse_yaml(annotations)
except yaml.error.YAMLError as e:
self.logger.debug("could not parse YAML: %s" % e)
return resource_identifier, objects
# Handler for K8s Secret resources.
def handle_k8s_secret(self, k8s_object: AnyDict) -> HandlerResult:
# XXX Another one where we shouldn't be saving everything.
secret_type = k8s_object.get('type', None)
metadata = k8s_object.get('metadata', None)
resource_name = metadata.get('name') if metadata else None
resource_namespace = metadata.get('namespace', 'default') if metadata else None
data = k8s_object.get('data', None)
skip = False
if (secret_type != 'kubernetes.io/tls') and (secret_type != 'Opaque'):
self.logger.debug("ignoring K8s Secret with unknown type %s" % secret_type)
skip = True
if not data:
self.logger.debug("ignoring K8s Secret with no data")
skip = True
if not metadata:
self.logger.debug("ignoring K8s Secret with no metadata")
skip = True
if not resource_name:
self.logger.debug("ignoring K8s Secret with no name")
skip = True
if not skip and (Config.single_namespace and (resource_namespace != Config.ambassador_namespace)):
# This should never happen in actual usage, since we shouldn't be given things
# in the wrong namespace. However, in development, this can happen a lot.
self.logger.debug("ignoring K8s Secret in wrong namespace")
skip = True
if skip:
return None
# This resource identifier is useful for log output since filenames can be duplicated (multiple subdirectories)
resource_identifier = f'{resource_name}.{resource_namespace}'
tls_crt = data.get('tls.crt', None)
tls_key = data.get('tls.key', None)
if not tls_crt and not tls_key:
# Uh. WTFO?
self.logger.debug(f'ignoring K8s Secret {resource_identifier} with no keys')
return None
# No need to muck about with resolution later, just immediately turn this
# into an Ambassador Secret resource.
secret_info = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Secret',
'name': resource_name,
'namespace': resource_namespace
}
if tls_crt:
secret_info['tls_crt'] = tls_crt
if tls_key:
secret_info['tls_key'] = tls_key
return resource_identifier, [ secret_info ]
# Handler for Consul services
def handle_consul_service(self,
consul_rkey: str, consul_object: AnyDict) -> HandlerResult:
# resource_identifier = f'consul-{consul_rkey}'
endpoints = consul_object.get('Endpoints', [])
name = consul_object.get('Service', consul_rkey)
if len(endpoints) < 1:
# Bzzt.
self.logger.debug(f"ignoring Consul service {name} with no Endpoints")
return None
# We can turn this directly into an Ambassador Service resource, since Consul keeps
# services and endpoints together (as it should!!).
#
# Note that we currently trust the association ID to contain the datacenter name.
# That's a function of the watch_hook putting it there.
svc = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Service',
'name': name,
'datacenter': consul_object.get('Id') or 'dc1',
'endpoints': {}
}
for ep in endpoints:
ep_addr = ep.get('Address')
ep_port = ep.get('Port')
if not ep_addr or not ep_port:
self.logger.debug(f"ignoring Consul service {name} endpoint {ep['ID']} missing address info")
continue
# Consul services don't have the weird indirections that Kube services do, so just
# lump all the endpoints together under the same source port of '*'.
svc_eps = svc['endpoints'].setdefault('*', [])
svc_eps.append({
'ip': ep_addr,
'port': ep_port,
'target_kind': 'Consul'
})
# Once again: don't return this. Instead, save it in self.services.
self.services[f"consul-{name}-{svc['datacenter']}"] = svc
return None
def finalize(self) -> None:
# The point here is to sort out self.k8s_services and self.k8s_endpoints and
# turn them into proper Ambassador Service resources. This is a bit annoying,
# because of the annoyances of Kubernetes, but we'll give it a go.
#
# Here are the rules:
#
# 1. By the time we get here, we have a _complete_ set of Ambassador resources that
# have passed muster by virtue of having the correct namespace, the correct
# ambassador_id, etc. (They may have duplicate names at this point, admittedly.)
# Any service not mentioned by name is out. Since the Ambassador resources in
# self.elements are in fact AResources, we can farm this out to code for each
# resource.
#
# 2. The check is, by design, permissive. If in doubt, write the check to leave
# the resource in.
#
# 3. For any service that stays in, we vet its listed ports against self.k8s_endpoints.
# Anything with no matching ports is _not_ dropped; it is assumed to use service
# routing rather than endpoint routing.
od = {
'elements': [ x.as_dict() for x in self.elements ],
'k8s_endpoints': self.k8s_endpoints,
'k8s_services': self.k8s_services,
'services': self.services
}
# self.logger.debug("==== FINALIZE START\n%s" % json.dumps(od, sort_keys=True, indent=4))
for key, k8s_svc in self.k8s_services.items():
# See if we can find endpoints for this service.
k8s_ep = self.k8s_endpoints.get(key, None)
k8s_ep_ports = k8s_ep.get('ports', None) if k8s_ep else None
k8s_name = k8s_svc['name']
k8s_namespace = k8s_svc['namespace']
# OK, Kube is weird. The way all this works goes like this:
#
# 1. When you create a Kube Service, Kube will allocate a clusterIP
# for it and update DNS to resolve the name of the service to
# that clusterIP.
# 2. Kube will look over the pods matched by the Service's selectors
# and stick those pods' IP addresses into Endpoints for the Service.
# 3. The Service will have ports listed. These service.port entries can
# contain:
# port -- a port number you can talk to at the clusterIP
# name -- a name for this port
# targetPort -- a port number you can talk to at the _endpoint_ IP
# We'll call the 'port' entry here the "service-port".
# 4. If you talk to clusterIP:service-port, you will get magically
# proxied by the Kube CNI to a target port at one of the endpoint IPs.
#
# The $64K question is: how does Kube decide which target port to use?
#
# First, if there's only one endpoint port, that's the one that gets used.
#
# If there's more than one, if the Service's port entry has a targetPort
# number, it uses that. Otherwise it tries to find an endpoint port with
# the same name as the service port. Otherwise, I dunno, it punts and uses
# the service-port.
#
# So that's how Ambassador is going to do it, for each Service port entry.
#
# If we have no endpoints at all, Ambassador will end up routing using
# just the service name and port per the Mapping's service spec.
target_ports = {}
target_addrs = []
svc_endpoints = {}
if not k8s_ep or not k8s_ep_ports:
# No endpoints at all, so we're done with this service.
self.logger.debug(f'{key}: no endpoints at all')
else:
idx = -1
for port in k8s_svc['ports']:
idx += 1
k8s_target: Optional[int] = None
src_port = port.get('port', None)
if not src_port:
# WTFO. This is impossible.
self.logger.error(f"Kubernetes service {key} has no port number at index {idx}?")
continue
if len(k8s_ep_ports) == 1:
# Just one endpoint port. Done.
k8s_target = list(k8s_ep_ports.values())[0]
target_ports[src_port] = k8s_target
self.logger.debug(f'{key} port {src_port}: single endpoint port {k8s_target}')
continue
# Hmmm, we need to try to actually map whatever ports are listed for
# this service. Oh well.
found_key = False
fallback: Optional[int] = None
for attr in [ 'targetPort', 'name', 'port' ]:
port_key = port.get(attr) # This could be a name or a number, in general.
if port_key:
found_key = True
if not fallback and (port_key != 'name') and str(port_key).isdigit():
# fallback can only be digits.
fallback = port_key
# Do we have a destination port for this?
k8s_target = k8s_ep_ports.get(str(port_key), None)
if k8s_target:
self.logger.debug(f'{key} port {src_port} #{idx}: {attr} {port_key} -> {k8s_target}')
break
else:
self.logger.debug(f'{key} port {src_port} #{idx}: {attr} {port_key} -> miss')
if not found_key:
# WTFO. This is impossible.
self.logger.error(f"Kubernetes service {key} port {src_port} has an empty port spec at index {idx}?")
continue
if not k8s_target:
# This is most likely because we don't have endpoint info at all, so we'll do service
# routing.
#
# It's actually impossible for fallback to be unset, but WTF.
k8s_target = fallback or src_port
self.logger.debug(f'{key} port {src_port} #{idx}: falling back to {k8s_target}')
target_ports[src_port] = k8s_target
if not target_ports:
# WTFO. This is impossible. I guess we'll fall back to service routing.
self.logger.error(f"Kubernetes service {key} has no routable ports at all?")
# OK. Once _that's_ done we have to take the endpoint addresses into
# account, or just use the service name if we don't have that.
k8s_ep_addrs = k8s_ep.get('addresses', None)
if k8s_ep_addrs:
for addr in k8s_ep_addrs:
ip = addr.get('ip', None)
if ip:
target_addrs.append(ip)
# OK! If we have no target addresses, just use service routing.
if not target_addrs:
self.logger.debug(f'{key} falling back to service routing')
target_addrs = [ key ]
for src_port, target_port in target_ports.items():
svc_endpoints[src_port] = [ {
'ip': target_addr,
'port': target_port
} for target_addr in target_addrs ]
# Nope. Set this up for service routing.
self.services[f'k8s-{k8s_name}-{k8s_namespace}'] = {
'apiVersion': 'ambassador/v1',
'ambassador_id': Config.ambassador_id,
'kind': 'Service',
'name': k8s_name,
'namespace': k8s_namespace,
'endpoints': svc_endpoints
}
# OK. After all that, go turn all of the things in self.services into Ambassador
# Service resources.
for key, svc in self.services.items():
serialization = dump_yaml(svc, default_flow_style=False)
r = ACResource.from_dict(key, key, serialization, svc)
self.elements.append(r)
od = {
'elements': [ x.as_dict() for x in self.elements ],
'k8s_endpoints': self.k8s_endpoints,
'k8s_services': self.k8s_services,
'services': self.services
}
# self.logger.debug("==== FINALIZE END\n%s" % json.dumps(od, sort_keys=True, indent=4))
|
import torch
import numpy as np
import dnnutil.network as network
import time
__all__ = ['calculate_accuracy', 'Trainer', 'ClassifierTrainer', 'AutoencoderTrainer']
def calculate_accuracy(prediction, label, axis=1):
'''calculate_accuracy(prediction, label)
Computes the mean accuracy over a batch of predictions and corresponding
ground-truth labels.
Args:
prediction (Tensor): A batch of predictions. Assumed to have shape
[batch-size, nclasses, [d0, d1, ...]].
label (LongTensor): A batch of labels. Assumed to have shape
[batch-size, [d0, d1, ...]]). The number of dimensions should be
one less than prediction.
Returns:
accuracy (Tensor): A single-element Tensor containing the percent of
correct predictions in the batch as a value between 0 and 1.
'''
return torch.eq(prediction.argmax(axis), label).float().mean().item()
class Trainer(object):
'''Trainer(net, optim, loss_fn, accuracy_metric=None)
Base class for all network trainers. Network trainer classes provide
methods to facilitate training and testing deep network models. The goal
is to encapsulate the common functionality, to reduce the boilerplate
code that needs to be repeated across projects.
Args:
net (torch.nn.Module): An instance of a network that inherits from
torch.nn.Module.
optim (torch.optim.Optimizer): An instance of an optimizer that
inherits from torch.optim.Optimizer.
loss_fn (callable): A callable that calculates and returns a loss
value. The loss value should be a single-element Tensor.
accuracy_metric (callable): A callabel that calculates and returns
an accuracy value. Usually this will be a floating point number
in [0, 1].
'''
def __init__(self, net, optim, loss_fn, accuracy_metric=None):
self.net = net
self.loss_fn = loss_fn
self.optim = optim
if accuracy_metric is not None:
self.measure_accuracy = accuracy_metric
else:
self.measure_accuracy = calculate_accuracy
self.train_loss = 0.
self.train_acc = 0.
self.test_loss = 0.
self.test_acc = 0.
def _set_train_stats(self, stats):
'''TODO:docs
'''
self.train_loss = stats[0]
self.train_acc = stats[1]
def _set_test_stats(self, stats):
'''TODO:docs
'''
self.test_loss = stats[0]
self.test_acc = stats[1]
def get_stats(self):
'''TODO:docs
'''
return (self.train_loss, self.train_acc,
self.test_loss, self.test_acc)
def train(self, dataloader, epoch):
'''Train the Trainer's network.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the training data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
accuracy (float): The mean accuracy over the epoch (in [0, 1]).
'''
self.net.train()
stats = self._run_epoch(dataloader, epoch)
self._set_train_stats(stats)
return stats
def eval(self, dataloader, epoch):
'''Evaluate the Trainer's network.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the testing data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
accuracy (float): The mean accuracy over the epoch (in [0, 1]).
'''
self.net.eval()
stats = self._run_epoch(dataloader, epoch)
self._set_test_stats(stats)
return stats
def _run_epoch(self, dataloader, epoch):
'''Perform a single epoch of either training or evaluation.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the testing data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
accuracy (float): The mean accuracy over the epoch (in [0, 1]).
'''
N = len(dataloader.batch_sampler)
msg = 'train' if self.net.training else 'test'
func = self.train_batch if self.net.training else self.test_batch
loss = []
acc = []
at = 0
for i, batch in enumerate(dataloader):
t = time.time()
if self.net.training:
self.update_lr(epoch * N + i + 1)
batch_loss, batch_acc = func(batch)
t = time.time() - t
if i == 0:
at = t
else:
at = at * i / (i + 1) + t / (i + 1)
loss.append(batch_loss)
acc.append(batch_acc)
print(f'\rEPOCH {epoch}: {msg} '
f'batch {i + 1:04d}/{N} '
f'lr[ {self.optim.param_groups[0]['lr']:1.3e} ] '
f'[ {t:.3f} ({at:.3f}) secs ]'
f'{' '*10}',
end='', flush=True)
loss = np.mean(loss)
acc = np.mean(acc)
return loss, acc
def update_lr(self, i=None):
'''Update the optimizer's learning rate. Used for batch-level
learning rate scheduling. If using an epoch-level scheduler,
define and use it in the epoch loop. If the iteration number is
not provided (None) or the Trainer has no lr_schedule attribute,
this function does nothing and returns.
Args:
i (int): iteration number (starts at 1 for the first batch).
'''
if i is None or not hasattr(self, 'lr_schedule'):
return
self.lr_schedule.step(i)
def train_batch(self, batch):
'''Train the Trainer's network on a single training batch.
'''
raise NotImplementedError()
def test_batch(self, batch):
'''Test the Trainer's network on a single testing batch.
'''
raise NotImplementedError()
class ClassifierTrainer(Trainer):
'''ClassifierTrainer(net, optim, loss_fn, accuracy_metric=None)
Trainer for training a network to do image classification.
Args:
net (torch.nn.Module): An instance of a network that inherits from
torch.nn.Module.
optim (torch.optim.Optimizer): An instance of an optimizer that
inherits from torch.optim.Optimizer.
loss_fn (callable): A callable that calculates and returns a loss
value. The loss value should be a single-element Tensor.
accuracy_metric (callable): A callabel that calculates and returns
an accuracy value. Usually this will be a floating point number
in [0, 1].
'''
def train_batch(self, batch):
'''Train the Trainer's network on a single training batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
accuracy (float): The mean accuracy over the batch (in [0, 1]).
'''
self.optim.zero_grad()
imgs, labels = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, labels)
loss.backward()
self.optim.step()
loss = loss.item()
with torch.no_grad():
accuracy = self.measure_accuracy(predictions, labels)
return loss, accuracy
@torch.no_grad()
def test_batch(self, batch):
'''Evaluate the Trainer's network on a single testing batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
accuracy (float): The mean accuracy over the batch (in [0, 1]).
'''
imgs, labels = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, labels).item()
accuracy = self.measure_accuracy(predictions, labels)
return loss, accuracy
class AutoencoderTrainer(Trainer):
'''AutoencoderTrainer(net, optim, loss_fn)
Trainer for training an autoencoder network.
Args:
net (torch.nn.Module): An instance of a network that inherits from
torch.nn.Module.
optim (torch.optim.Optimizer): An instance of an optimizer that
inherits from torch.optim.Optimizer.
loss_fn (callable): A callable that calculates and returns a loss
value. The loss value should be a single-element Tensor.
'''
def __init__(self, net, optim, loss_fn):
super(AutoencoderTrainer, self).__init__(
net, optim, loss_fn, None)
delattr(self, 'measure_accuracy')
def train_batch(self, batch):
'''Train the Trainer's network on a single training batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
'''
self.optim.zero_grad()
imgs = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, imgs)
loss.backward()
self.optim.step()
loss = loss.item()
return loss
@torch.no_grad()
def test_batch(self, batch):
'''Evaluate the Trainer's network on a single testing batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
'''
imgs = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, imgs).item()
return loss
def _run_epoch(self, dataloader, epoch):
'''Perform a single epoch of either training or evaluation.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the testing data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
'''
N = int(np.ceil(len(dataloader.dataset) / dataloader.batch_size))
msg = 'train' if self.net.training else 'test'
func = self.train_batch if self.net.training else self.test_batch
loss = []
for i, batch in enumerate(dataloader):
batch_loss = func(batch)
loss.append(batch_loss)
print(f'\rEPOCH {epoch}: {msg} batch {i:04d}/{N}{' '*10}',
end='', flush=True)
loss = np.mean(loss)
return loss
|
import torch
import numpy as np
import dnnutil.network as network
import time
__all__ = ['calculate_accuracy', 'Trainer', 'ClassifierTrainer', 'AutoencoderTrainer']
def calculate_accuracy(prediction, label, axis=1):
'''calculate_accuracy(prediction, label)
Computes the mean accuracy over a batch of predictions and corresponding
ground-truth labels.
Args:
prediction (Tensor): A batch of predictions. Assumed to have shape
[batch-size, nclasses, [d0, d1, ...]].
label (LongTensor): A batch of labels. Assumed to have shape
[batch-size, [d0, d1, ...]]). The number of dimensions should be
one less than prediction.
Returns:
accuracy (Tensor): A single-element Tensor containing the percent of
correct predictions in the batch as a value between 0 and 1.
'''
return torch.eq(prediction.argmax(axis), label).float().mean().item()
class Trainer(object):
'''Trainer(net, optim, loss_fn, accuracy_metric=None)
Base class for all network trainers. Network trainer classes provide
methods to facilitate training and testing deep network models. The goal
is to encapsulate the common functionality, to reduce the boilerplate
code that needs to be repeated across projects.
Args:
net (torch.nn.Module): An instance of a network that inherits from
torch.nn.Module.
optim (torch.optim.Optimizer): An instance of an optimizer that
inherits from torch.optim.Optimizer.
loss_fn (callable): A callable that calculates and returns a loss
value. The loss value should be a single-element Tensor.
accuracy_metric (callable): A callabel that calculates and returns
an accuracy value. Usually this will be a floating point number
in [0, 1].
'''
def __init__(self, net, optim, loss_fn, accuracy_metric=None):
self.net = net
self.loss_fn = loss_fn
self.optim = optim
if accuracy_metric is not None:
self.measure_accuracy = accuracy_metric
else:
self.measure_accuracy = calculate_accuracy
self.train_loss = 0.
self.train_acc = 0.
self.test_loss = 0.
self.test_acc = 0.
def _set_train_stats(self, stats):
'''TODO:docs
'''
self.train_loss = stats[0]
self.train_acc = stats[1]
def _set_test_stats(self, stats):
'''TODO:docs
'''
self.test_loss = stats[0]
self.test_acc = stats[1]
def get_stats(self):
'''TODO:docs
'''
return (self.train_loss, self.train_acc,
self.test_loss, self.test_acc)
def train(self, dataloader, epoch):
'''Train the Trainer's network.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the training data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
accuracy (float): The mean accuracy over the epoch (in [0, 1]).
'''
self.net.train()
stats = self._run_epoch(dataloader, epoch)
self._set_train_stats(stats)
return stats
def eval(self, dataloader, epoch):
'''Evaluate the Trainer's network.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the testing data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
accuracy (float): The mean accuracy over the epoch (in [0, 1]).
'''
self.net.eval()
stats = self._run_epoch(dataloader, epoch)
self._set_test_stats(stats)
return stats
def _run_epoch(self, dataloader, epoch):
'''Perform a single epoch of either training or evaluation.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the testing data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
accuracy (float): The mean accuracy over the epoch (in [0, 1]).
'''
N = len(dataloader.batch_sampler)
msg = 'train' if self.net.training else 'test'
func = self.train_batch if self.net.training else self.test_batch
loss = []
acc = []
at = 0
for i, batch in enumerate(dataloader):
t = time.time()
if self.net.training:
self.update_lr(epoch * N + i + 1)
batch_loss, batch_acc = func(batch)
t = time.time() - t
if i == 0:
at = t
else:
at = at * i / (i + 1) + t / (i + 1)
loss.append(batch_loss)
acc.append(batch_acc)
print(f'\rEPOCH {epoch}: {msg} '
f'batch {i + 1:04d}/{N} '
f'lr[ {self.optim.param_groups[0]["lr"]:1.3e} ] '
f'[ {t:.3f} ({at:.3f}) secs ]'
f'{" "*10}',
end='', flush=True)
loss = np.mean(loss)
acc = np.mean(acc)
return loss, acc
def update_lr(self, i=None):
'''Update the optimizer's learning rate. Used for batch-level
learning rate scheduling. If using an epoch-level scheduler,
define and use it in the epoch loop. If the iteration number is
not provided (None) or the Trainer has no lr_schedule attribute,
this function does nothing and returns.
Args:
i (int): iteration number (starts at 1 for the first batch).
'''
if i is None or not hasattr(self, 'lr_schedule'):
return
self.lr_schedule.step(i)
def train_batch(self, batch):
'''Train the Trainer's network on a single training batch.
'''
raise NotImplementedError()
def test_batch(self, batch):
'''Test the Trainer's network on a single testing batch.
'''
raise NotImplementedError()
class ClassifierTrainer(Trainer):
'''ClassifierTrainer(net, optim, loss_fn, accuracy_metric=None)
Trainer for training a network to do image classification.
Args:
net (torch.nn.Module): An instance of a network that inherits from
torch.nn.Module.
optim (torch.optim.Optimizer): An instance of an optimizer that
inherits from torch.optim.Optimizer.
loss_fn (callable): A callable that calculates and returns a loss
value. The loss value should be a single-element Tensor.
accuracy_metric (callable): A callabel that calculates and returns
an accuracy value. Usually this will be a floating point number
in [0, 1].
'''
def train_batch(self, batch):
'''Train the Trainer's network on a single training batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
accuracy (float): The mean accuracy over the batch (in [0, 1]).
'''
self.optim.zero_grad()
imgs, labels = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, labels)
loss.backward()
self.optim.step()
loss = loss.item()
with torch.no_grad():
accuracy = self.measure_accuracy(predictions, labels)
return loss, accuracy
@torch.no_grad()
def test_batch(self, batch):
'''Evaluate the Trainer's network on a single testing batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
accuracy (float): The mean accuracy over the batch (in [0, 1]).
'''
imgs, labels = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, labels).item()
accuracy = self.measure_accuracy(predictions, labels)
return loss, accuracy
class AutoencoderTrainer(Trainer):
'''AutoencoderTrainer(net, optim, loss_fn)
Trainer for training an autoencoder network.
Args:
net (torch.nn.Module): An instance of a network that inherits from
torch.nn.Module.
optim (torch.optim.Optimizer): An instance of an optimizer that
inherits from torch.optim.Optimizer.
loss_fn (callable): A callable that calculates and returns a loss
value. The loss value should be a single-element Tensor.
'''
def __init__(self, net, optim, loss_fn):
super(AutoencoderTrainer, self).__init__(
net, optim, loss_fn, None)
delattr(self, 'measure_accuracy')
def train_batch(self, batch):
'''Train the Trainer's network on a single training batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
'''
self.optim.zero_grad()
imgs = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, imgs)
loss.backward()
self.optim.step()
loss = loss.item()
return loss
@torch.no_grad()
def test_batch(self, batch):
'''Evaluate the Trainer's network on a single testing batch.
Args:
batch (iterable): A 2-tuple of (images, labels). Images is a 4-d
Tensor of shape (BxCxHxW), and labels is a Tensor of 2 or more
dimensions (BxLx*) which matches images in the first (batch)
dimension. The exact dimensionality of labels will depend on
the application and loss function chosen, but often consists
of integer class-indexes.
Returns:
loss (float): The mean loss over the batch.
'''
imgs = network.tocuda(batch)
predictions = self.net(imgs)
loss = self.loss_fn(predictions, imgs).item()
return loss
def _run_epoch(self, dataloader, epoch):
'''Perform a single epoch of either training or evaluation.
Args:
dataloader (torch.utils.data.DataLoader): An instance of a
DataLoader, which will provide access to the testing data.
epoch (int): The current epoch.
Returns:
loss (float): The mean loss over the epoch.
'''
N = int(np.ceil(len(dataloader.dataset) / dataloader.batch_size))
msg = 'train' if self.net.training else 'test'
func = self.train_batch if self.net.training else self.test_batch
loss = []
for i, batch in enumerate(dataloader):
batch_loss = func(batch)
loss.append(batch_loss)
print(f'\rEPOCH {epoch}: {msg} batch {i:04d}/{N}{" "*10}',
end='', flush=True)
loss = np.mean(loss)
return loss
|
"""
This script uses python to build a `.nec` file. This allows
for the use of variables and other arithmetic which is much
easier in python. For information on the cards specified by the
arguments, e.g. EX or RP, check out https://www.nec2.org/part_3/cards/
"""
from datetime import datetime as dt
from math import *
def build_nec_file(
comments,
wires,
constants,
frequency=[],
excitations=[],
rad_pattern=[],
output="output",
lims=[2, 5, 10, 20, 30, 40, 50, 60, 70, 80],
sig_figs=2,
verbose=0,
):
"""
Creates a `.nec` file. The values can contain arithmetic in it. Anything
that Python's `eval` can handle and any function in the `math` package,
so trig functions, exponentials, etc.
Parameters:
comments - The comments that are found on CM cards, added as a list
wires - The wire data found on GW cards, a list of lists where the
elements of the sublist are each parameter for the wire. Can use
constants defined in the `constants` argument and baisc arithmatic
(or any function defined in Python's `math` package).
constants - A dictionary of constants to be substituted into the nec
file. Constant names may not be such that one is found in another.
For example, you cannot have 'offset' and 'origin_offset' because
'offset' can be found (via Python's `replace` method in 'origin_offset').
frequency (default []) - Defines the FR card, the frequency range and step
for calculations.
excitations (default []) - List for EX cards, cards that define excitations,
e.g. voltage sources.
rad_pattern (default []) - The RP card which defines how to calculate the
the radiation pattern.
output (default 'output') - The name of the output `.nec` file, the
extension is automatically added.
lims (default [2, 5, 10, 20, 30, 40, 50, 60, 70, 80]) - The character
number that each column ends on. For example, for the default,
we allocate 2 characters for the first argument (the card name),
3 for the next column, 5 for the third, and 10 for the rest.
sig_figs (default 2) - The number of significant figures used for the
numbers written in scientific notation (i.e. how many digits after
the decimal point).
verbose (default 2) - If 0, will not print out anything. If 1, will print out
just info on the number of wires, file location and time taken to create
file. If 2, will print out the comments in the .nec file, and info on the
number of wires, file location and time taken to create file.
"""
# scinot_ind tells this function at which column of a row to
# start using scientific notation
def _format_rows(rows, card, scinot_ind):
for row in rows:
row_str = card
for ind, param in enumerate(row):
# Replace constants with values
for const_key, const_val in constants.items():
param = param.replace(const_key, str(const_val))
# Add to line correctly formatted
rlim = lims[ind + 1] - lims[ind]
if ind > (scinot_ind - 1):
# Change to 3-digit rounded scientific notation
val = f"{eval(param):.{sig_figs}e}"
else:
# Otherwise just evaluate, e.g. tag number
val = str(eval(param))
# Add to string and push the rightmost it can go
row_str += f"{val.rjust(rlim):<{rlim}}"
nec_file.append(row_str)
dt_start = dt.now()
nec_file = []
# Add comments
for comment in comments:
nec_file.append(f"CM {comment}")
# Comment end
nec_file.append("CE")
# Add wires
_format_rows(rows=wires, card="GW", scinot_ind=2)
# Wire end
nec_file.append(f"GE{(lims[1] - lims[0] - 1)*" "}0")
# Frequency
if frequency:
_format_rows(rows=[frequency], card="FR", scinot_ind=4)
# Excitations
if excitations:
_format_rows(rows=excitations, card="EX", scinot_ind=4)
# Radation pattern,
if rad_pattern:
_format_rows(rows=[rad_pattern], card="RP", scinot_ind=8)
# File end
nec_file.append("EN\n")
# Write to new file
with open(f"{output}.nec", "w") as f:
f.write("\n".join(nec_file))
dt_end = dt.now()
if verbose:
if verbose == 2:
print("\nComments:")
for comment in comments:
print(" " * 8 + f"{comment}")
print(
f"Wrote {len(wires)} wires to {output}.nec in "
+ f"{(dt_end - dt_start).total_seconds() * 1000:.3f}ms."
)
|
"""
This script uses python to build a `.nec` file. This allows
for the use of variables and other arithmetic which is much
easier in python. For information on the cards specified by the
arguments, e.g. EX or RP, check out https://www.nec2.org/part_3/cards/
"""
from datetime import datetime as dt
from math import *
def build_nec_file(
comments,
wires,
constants,
frequency=[],
excitations=[],
rad_pattern=[],
output="output",
lims=[2, 5, 10, 20, 30, 40, 50, 60, 70, 80],
sig_figs=2,
verbose=0,
):
"""
Creates a `.nec` file. The values can contain arithmetic in it. Anything
that Python's `eval` can handle and any function in the `math` package,
so trig functions, exponentials, etc.
Parameters:
comments - The comments that are found on CM cards, added as a list
wires - The wire data found on GW cards, a list of lists where the
elements of the sublist are each parameter for the wire. Can use
constants defined in the `constants` argument and baisc arithmatic
(or any function defined in Python's `math` package).
constants - A dictionary of constants to be substituted into the nec
file. Constant names may not be such that one is found in another.
For example, you cannot have 'offset' and 'origin_offset' because
'offset' can be found (via Python's `replace` method in 'origin_offset').
frequency (default []) - Defines the FR card, the frequency range and step
for calculations.
excitations (default []) - List for EX cards, cards that define excitations,
e.g. voltage sources.
rad_pattern (default []) - The RP card which defines how to calculate the
the radiation pattern.
output (default 'output') - The name of the output `.nec` file, the
extension is automatically added.
lims (default [2, 5, 10, 20, 30, 40, 50, 60, 70, 80]) - The character
number that each column ends on. For example, for the default,
we allocate 2 characters for the first argument (the card name),
3 for the next column, 5 for the third, and 10 for the rest.
sig_figs (default 2) - The number of significant figures used for the
numbers written in scientific notation (i.e. how many digits after
the decimal point).
verbose (default 2) - If 0, will not print out anything. If 1, will print out
just info on the number of wires, file location and time taken to create
file. If 2, will print out the comments in the .nec file, and info on the
number of wires, file location and time taken to create file.
"""
# scinot_ind tells this function at which column of a row to
# start using scientific notation
def _format_rows(rows, card, scinot_ind):
for row in rows:
row_str = card
for ind, param in enumerate(row):
# Replace constants with values
for const_key, const_val in constants.items():
param = param.replace(const_key, str(const_val))
# Add to line correctly formatted
rlim = lims[ind + 1] - lims[ind]
if ind > (scinot_ind - 1):
# Change to 3-digit rounded scientific notation
val = f"{eval(param):.{sig_figs}e}"
else:
# Otherwise just evaluate, e.g. tag number
val = str(eval(param))
# Add to string and push the rightmost it can go
row_str += f"{val.rjust(rlim):<{rlim}}"
nec_file.append(row_str)
dt_start = dt.now()
nec_file = []
# Add comments
for comment in comments:
nec_file.append(f"CM {comment}")
# Comment end
nec_file.append("CE")
# Add wires
_format_rows(rows=wires, card="GW", scinot_ind=2)
# Wire end
nec_file.append(f"GE{(lims[1] - lims[0] - 1)*' '}0")
# Frequency
if frequency:
_format_rows(rows=[frequency], card="FR", scinot_ind=4)
# Excitations
if excitations:
_format_rows(rows=excitations, card="EX", scinot_ind=4)
# Radation pattern,
if rad_pattern:
_format_rows(rows=[rad_pattern], card="RP", scinot_ind=8)
# File end
nec_file.append("EN\n")
# Write to new file
with open(f"{output}.nec", "w") as f:
f.write("\n".join(nec_file))
dt_end = dt.now()
if verbose:
if verbose == 2:
print("\nComments:")
for comment in comments:
print(" " * 8 + f"{comment}")
print(
f"Wrote {len(wires)} wires to {output}.nec in "
+ f"{(dt_end - dt_start).total_seconds() * 1000:.3f}ms."
)
|
import random
import shutil
import os
import numpy as np
import data_loader
import audio_processing
from typing import Dict
from loguru import logger
from tqdm import tqdm
from pprint import pprint
class DataGenerator:
def __init__(self, conf: Dict, batch_size: int = 8):
assert "csv_file_path" in conf
assert "base_dir" in conf
self.conf = conf.copy()
self.batch_size = batch_size
self.examples = data_loader.data_loader(conf)
self.num_examples = len(self.examples)
self.train = {0: [], 1: []}
self.valid = {0: [], 1: []}
self.train_counts = {0: 0, 1: 0}
self.valid_counts = {0: 0, 1: 0}
self.num_train = 0
self.num_valid = 0
self.classes = [0, 1]
self.input_shapes = {
"spec": (),
"hpss": ()
}
logger.info("DataGenerator instantiated")
self.preprocess()
logger.info("Preprocessing complete")
def preprocess(self):
logger.info("Preprocessing examples")
logger.info(f"{self.input_shapes["spec"]} = Current input shape for spec")
folder = os.path.join(self.conf.get("preprocess_dir"))
if self.conf.get("reset_data"):
if os.path.isdir(folder):
shutil.rmtree(folder)
if not os.path.isdir(folder):
os.mkdir(folder)
min_level = 50 - self.conf.get("threshold")
max_level = 50 + self.conf.get("threshold")
valid_split = int(self.conf.get("valid_split") * 100)
logger.info(f"Min level {min_level}, Max level {max_level}")
for key, value in tqdm(self.examples.items()):
audio_file_name = value["audio_file_name"]
file_path = os.path.join(self.conf.get("base_dir"), f"{audio_file_name}.wav")
current_class = 1
for j, feature in enumerate(self.conf.get("features")):
current_val = int(value[feature])
current_class = -1
if current_val < min_level:
current_class = 0
elif current_val > max_level:
current_class = 1
if current_class == -1:
continue
target_file_path = os.path.join(self.conf.get("preprocess_dir"), audio_file_name)
if not os.path.isfile(f"{target_file_path}.spec.npy"):
spec, hpss = audio_processing.get_features(file_path, self.conf)
self.input_shapes["spec"] = spec.shape
self.input_shapes["hpss"] = hpss.shape
np.save(f"{target_file_path}.spec", spec)
np.save(f"{target_file_path}.hpss", hpss)
elif len(self.input_shapes["spec"]) == 0:
spec = np.load(f"{target_file_path}.spec.npy")
hpss = np.load(f"{target_file_path}.hpss.npy")
logger.info("Setting input shapes based on previous files")
logger.info(f"{spec.shape}, {hpss.shape}")
self.input_shapes["spec"] = spec.shape
self.input_shapes["hpss"] = hpss.shape
if random.randint(0, 99) < valid_split:
self.valid[current_class].append(target_file_path)
self.valid_counts[current_class] += 1
else:
self.train[current_class].append(target_file_path)
self.train_counts[current_class] += 1
self.num_train = sum(list(self.train_counts.values()))
self.num_valid = sum(list(self.train_counts.values()))
logger.info("Class counts in training set")
pprint(self.train_counts)
logger.info("Class counts in validation set")
pprint(self.valid_counts)
def generator(self, set_name: str):
assert set_name in ["train", "valid"], "Set name must be either train or valid"
while True:
spec_batch = np.zeros((self.batch_size,) + self.input_shapes["spec"])
hpss_batch = np.zeros((self.batch_size,) + self.input_shapes["hpss"])
y_batch = np.zeros((self.batch_size, ))
current_set = eval(f"self.{set_name}")
for i in range(0, self.batch_size):
target_class = random.choice([0, 1])
example_file = random.choice(current_set[target_class])
example_spec = np.load(f"{example_file}.spec.npy") * self.conf.get("scale_factor")
example_hpss = np.load(f"{example_file}.hpss.npy") * self.conf.get("scale_factor")
spec_batch[i] = example_spec
hpss_batch[i] = example_hpss
y_batch[i] = target_class
yield {"spec": spec_batch, "hpss": hpss_batch}, {"output": y_batch}
|
import random
import shutil
import os
import numpy as np
import data_loader
import audio_processing
from typing import Dict
from loguru import logger
from tqdm import tqdm
from pprint import pprint
class DataGenerator:
def __init__(self, conf: Dict, batch_size: int = 8):
assert "csv_file_path" in conf
assert "base_dir" in conf
self.conf = conf.copy()
self.batch_size = batch_size
self.examples = data_loader.data_loader(conf)
self.num_examples = len(self.examples)
self.train = {0: [], 1: []}
self.valid = {0: [], 1: []}
self.train_counts = {0: 0, 1: 0}
self.valid_counts = {0: 0, 1: 0}
self.num_train = 0
self.num_valid = 0
self.classes = [0, 1]
self.input_shapes = {
"spec": (),
"hpss": ()
}
logger.info("DataGenerator instantiated")
self.preprocess()
logger.info("Preprocessing complete")
def preprocess(self):
logger.info("Preprocessing examples")
logger.info(f"{self.input_shapes['spec']} = Current input shape for spec")
folder = os.path.join(self.conf.get("preprocess_dir"))
if self.conf.get("reset_data"):
if os.path.isdir(folder):
shutil.rmtree(folder)
if not os.path.isdir(folder):
os.mkdir(folder)
min_level = 50 - self.conf.get("threshold")
max_level = 50 + self.conf.get("threshold")
valid_split = int(self.conf.get("valid_split") * 100)
logger.info(f"Min level {min_level}, Max level {max_level}")
for key, value in tqdm(self.examples.items()):
audio_file_name = value["audio_file_name"]
file_path = os.path.join(self.conf.get("base_dir"), f"{audio_file_name}.wav")
current_class = 1
for j, feature in enumerate(self.conf.get("features")):
current_val = int(value[feature])
current_class = -1
if current_val < min_level:
current_class = 0
elif current_val > max_level:
current_class = 1
if current_class == -1:
continue
target_file_path = os.path.join(self.conf.get("preprocess_dir"), audio_file_name)
if not os.path.isfile(f"{target_file_path}.spec.npy"):
spec, hpss = audio_processing.get_features(file_path, self.conf)
self.input_shapes["spec"] = spec.shape
self.input_shapes["hpss"] = hpss.shape
np.save(f"{target_file_path}.spec", spec)
np.save(f"{target_file_path}.hpss", hpss)
elif len(self.input_shapes["spec"]) == 0:
spec = np.load(f"{target_file_path}.spec.npy")
hpss = np.load(f"{target_file_path}.hpss.npy")
logger.info("Setting input shapes based on previous files")
logger.info(f"{spec.shape}, {hpss.shape}")
self.input_shapes["spec"] = spec.shape
self.input_shapes["hpss"] = hpss.shape
if random.randint(0, 99) < valid_split:
self.valid[current_class].append(target_file_path)
self.valid_counts[current_class] += 1
else:
self.train[current_class].append(target_file_path)
self.train_counts[current_class] += 1
self.num_train = sum(list(self.train_counts.values()))
self.num_valid = sum(list(self.train_counts.values()))
logger.info("Class counts in training set")
pprint(self.train_counts)
logger.info("Class counts in validation set")
pprint(self.valid_counts)
def generator(self, set_name: str):
assert set_name in ["train", "valid"], "Set name must be either train or valid"
while True:
spec_batch = np.zeros((self.batch_size,) + self.input_shapes["spec"])
hpss_batch = np.zeros((self.batch_size,) + self.input_shapes["hpss"])
y_batch = np.zeros((self.batch_size, ))
current_set = eval(f"self.{set_name}")
for i in range(0, self.batch_size):
target_class = random.choice([0, 1])
example_file = random.choice(current_set[target_class])
example_spec = np.load(f"{example_file}.spec.npy") * self.conf.get("scale_factor")
example_hpss = np.load(f"{example_file}.hpss.npy") * self.conf.get("scale_factor")
spec_batch[i] = example_spec
hpss_batch[i] = example_hpss
y_batch[i] = target_class
yield {"spec": spec_batch, "hpss": hpss_batch}, {"output": y_batch}
|
"""
FUNÇÕES BÁSICAS PARA O PROGRAMA
"""
from time import sleep
# Imprimir caracter especial
def linha(tam=40):
print(f"{"="*tam}")
# Recebe e valida um nome
def ler_nome(txt):
stop = True
while stop:
stop = False
nome = input(txt).strip()
lista_nome = nome.split()
if len(lista_nome) == 0:
print("ERRO! Você digitou um nome vazio...")
sleep(1)
stop = True
else:
for valor in lista_nome:
# Verifica se o nome contém conteúdo não alfabético
if not valor.isalpha():
print("ERRO! Você digitou um nome inválido...")
sleep(1)
stop = True
nome = " ".join(lista_nome)
return nome
# Recebe e valida um número inteiro
def ler_inteiro(txt=""):
# Caso o texto seja vazio, exibe uma mensagem default
if txt == "":
txt = "Digite o valor de um número inteiro"
while True:
try:
inteiro = int(input(txt))
except (KeyboardInterrupt):
print("ERRO! Entrada de dados interrompida pelo usuário!")
inteiro = 0
break
except(ValueError):
print("ERRO! Você digitou um valor inteiro inválido...")
sleep(1)
except: # Demais erros
print("ERRO! O programa teve um erro durante a leitura...")
sleep(1)
else:
break
return inteiro
# Recebe e valida uma idade
def ler_idade(txt):
if txt == "":
txt = "Digite o valor da idade"
while True:
idade = ler_inteiro(txt)
if idade < 0:
print("ERRO! Você digitou uma valor negativo...")
sleep(1)
else:
break
return idade
|
"""
FUNÇÕES BÁSICAS PARA O PROGRAMA
"""
from time import sleep
# Imprimir caracter especial
def linha(tam=40):
print(f"{'='*tam}")
# Recebe e valida um nome
def ler_nome(txt):
stop = True
while stop:
stop = False
nome = input(txt).strip()
lista_nome = nome.split()
if len(lista_nome) == 0:
print("ERRO! Você digitou um nome vazio...")
sleep(1)
stop = True
else:
for valor in lista_nome:
# Verifica se o nome contém conteúdo não alfabético
if not valor.isalpha():
print("ERRO! Você digitou um nome inválido...")
sleep(1)
stop = True
nome = " ".join(lista_nome)
return nome
# Recebe e valida um número inteiro
def ler_inteiro(txt=""):
# Caso o texto seja vazio, exibe uma mensagem default
if txt == "":
txt = "Digite o valor de um número inteiro"
while True:
try:
inteiro = int(input(txt))
except (KeyboardInterrupt):
print("ERRO! Entrada de dados interrompida pelo usuário!")
inteiro = 0
break
except(ValueError):
print("ERRO! Você digitou um valor inteiro inválido...")
sleep(1)
except: # Demais erros
print("ERRO! O programa teve um erro durante a leitura...")
sleep(1)
else:
break
return inteiro
# Recebe e valida uma idade
def ler_idade(txt):
if txt == "":
txt = "Digite o valor da idade"
while True:
idade = ler_inteiro(txt)
if idade < 0:
print("ERRO! Você digitou uma valor negativo...")
sleep(1)
else:
break
return idade
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
import numpy as np
import torch
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.states import RunningStage, TrainerState
from pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing
from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(self, trainer, multiple_trainloader_mode):
self.trainer = trainer
self.early_stopping_accumulator = None
self.checkpoint_accumulator = None
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self.automatic_optimization = True
self._curr_step_result = None
self._cur_grad_norm_dict = None
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
def on_trainer_init(
self,
max_epochs,
min_epochs,
max_steps,
min_steps,
num_sanity_val_steps,
automatic_optimization,
weights_summary,
):
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.interrupted = False
self.trainer.should_stop = False
self.trainer._state = TrainerState.INITIALIZING
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
self.automatic_optimization = automatic_optimization
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
self.trainer.weights_summary = weights_summary
if weights_summary is not None and weights_summary not in ModelSummary.MODES:
raise MisconfigurationException(
f"`weights_summary` can be None, {", ".join(ModelSummary.MODES)}, got {weights_summary}"
)
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
def should_skip_training(self):
should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps
should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
# hook
self.trainer.call_hook("on_train_start")
# provide rank to profiler
self.trainer.profile_connector.on_train_start(self.trainer)
def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# links data to the trainer
self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)
# check that model is configured correctly
self.trainer.config_validator.verify_loop_configurations(model)
# attach model log function to callback
self.trainer.callback_connector.attach_model_logging_functions(model)
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.trainer.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None and self.trainer.training_type_plugin.should_finalize:
self.trainer.logger.finalize("success")
# summarize profile results
if self.trainer.global_rank == 0:
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator_backend.on_train_end()
# clear mem
if self.trainer._device_type == DeviceType.GPU:
model = self.trainer.get_model()
model.cpu()
torch.cuda.empty_cache()
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def check_early_stopping_callback(self, should_update):
# TODO bake this logic into the EarlyStopping callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.trainer.current_epoch = epoch
model = self.trainer.get_model()
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# structured result accumulators for callbacks
self.early_stopping_accumulator = Accumulator()
self.checkpoint_accumulator = Accumulator()
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
# hook
self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model):
if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
hook_overridden = (
is_overridden("training_epoch_end", model=self.trainer.get_model())
or is_overridden("on_train_epoch_end", model=self.trainer.get_model())
)
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if not (hook_overridden or auto_reduce_tng_result):
continue
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def get_optimizers_iterable(self):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
optimizers_loop_length = optimizer_freq_cumsum[-1]
current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
is_result_obj = isinstance(training_step_output, Result)
if is_result_obj:
training_step_output.detach()
else:
training_step_output.batch_loss = training_step_output.batch_loss.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator_backend.training_step(args)
self.trainer.accelerator_backend.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
is_result_obj = isinstance(training_step_output, Result)
if training_step_output_for_epoch_end is None:
return None
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.train_loop.automatic_optimization:
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
if is_result_obj:
closure_loss = training_step_output.minimize
else:
closure_loss = training_step_output.batch_loss
closure_loss = closure_loss / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
hiddens=training_step_output.hiddens,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output
# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
# no need for these checks in 1.0.0
# TODO: remove checks in 1.0.0
is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)
is_1_0_output = is_tensor or ("log" not in training_step_output and "progress_bar" not in training_step_output)
if is_1_0_output:
return self._process_training_step_output_1_0(training_step_output, split_batch)
# -----------------------------------------
# process old dict (deprecate 1.0)
# -----------------------------------------
training_step_output = self.trainer.process_dict_result(training_step_output, train=True)
training_step_output = AttributeDict(
batch_loss=training_step_output[0],
pbar_on_batch_end=training_step_output[1],
log_metrics=training_step_output[2],
callback_metrics=training_step_output[3],
hiddens=training_step_output[4],
)
# if the user decides to finally reduce things in epoch_end, save raw output without graphs
if isinstance(training_step_output_for_epoch_end, torch.Tensor):
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
else:
training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)
return training_step_output_for_epoch_end, training_step_output
def _process_training_step_output_1_0(self, training_step_output, split_batch):
result = self.trainer.get_model()._results
loss = None
hiddens = None
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
result["extra"] = {}
# map to results under the hood
result.minimize = loss
result.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end.cpu()
# what flows back into the system
training_step_output = result
return training_step_output_for_epoch_end, training_step_output
def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.
Option 1:
return loss
Option 2:
return {'loss': loss, 'anything_else': ...}
Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)
training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()
return training_step_output_for_epoch_end
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.get_model()
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.get_model()
grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)
return grad_norm_dict
def process_hiddens(self, opt_closure_result):
hiddens = opt_closure_result.hiddens
if isinstance(opt_closure_result.training_step_output, Result):
opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()
return hiddens
def tbptt_split_batch(self, batch):
splits = [batch]
if self.trainer.truncated_bptt_steps is not None:
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
should_check_val = False
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
batch_end_outputs = self.process_train_step_outputs(
batch_output.training_step_output_for_epoch_end,
self.early_stopping_accumulator,
self.checkpoint_accumulator,
)
# hook
# TODO: add outputs to batches
self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.run_evaluation()
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if self._num_training_batches_reached(is_last_batch):
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
# epoch end hook
self.run_on_epoch_end_hook(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(
epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers
)
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
if should_check_val:
self.trainer.run_evaluation(on_epoch=True)
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
if should_train_only:
# update epoch level lr_schedulers
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
self.check_checkpoint_callback(True)
self.check_early_stopping_callback(True)
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
self.trainer.hiddens = None
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]
if batch is None:
return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self.tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in self.prepare_optimizers():
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform dpp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch, batch_idx, opt_idx, self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# track hiddens
self.trainer.hiddens = self.process_hiddens(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if result is None:
if self.automatic_optimization:
self.warning_cache.warn("training_step returned None if it was on purpose, ignore this warning...")
return None
if not self._skip_backward and self.trainer.train_loop.automatic_optimization:
# backward pass
with self.trainer.profiler.profile("model_backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(result.loss)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.get_model().untoggle_optimizer(opt_idx)
return result
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator_backend.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(interval="step", monitor_metrics=monitor_metrics)
def run_on_epoch_end_hook(self, epoch_output):
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
self.trainer.call_hook('on_train_epoch_end', epoch_output)
self.trainer.call_hook('on_epoch_end')
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step += 1
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):
# decide if we should run validation
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
can_check_val = self.trainer.enable_validation and is_val_check_epoch
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
epoch_end_val_check = self.trainer.val_check_batch == self.trainer.num_training_batches
should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop
or is_last_batch_for_infinite_dataset
) if on_epoch else (is_val_check_batch and not epoch_end_val_check)
return should_check_val and can_check_val
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
args.append(opt_idx)
else:
num_opts = len(self.trainer.optimizers)
raise ValueError(
f"Your LightningModule defines {num_opts} optimizers but "
f'training_step is missing the "optimizer_idx" argument.'
)
# pass hiddens if using tbptt
if self.trainer.truncated_bptt_steps is not None:
args.append(hiddens)
return args
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):
"""
Figure out what needs to be tracked/logged at the end of the epoch
"""
# the training step outputs a list per optimizer. The list contains the outputs at each time step
# when no TBPTT is used, then the list has 1 item per batch
# when TBPTT IS used, then the list has n items (1 per time step)
batch_end_outputs = []
for optimizer_idx_outputs in all_train_step_outputs:
# extract one representative sample from each time step (1 if no tbptt) and 0th optimizer
if len(optimizer_idx_outputs) == 0:
continue
sample_output = optimizer_idx_outputs[-1]
# pull out callback info if available (ie: Results object)
if isinstance(sample_output, dict) and "early_stop_on" in sample_output:
early_stopping_accumulator.accumulate(sample_output["early_stop_on"])
if isinstance(sample_output, dict) and "checkpoint_on" in sample_output:
checkpoint_accumulator.accumulate(sample_output["checkpoint_on"])
batch_end_outputs.append(optimizer_idx_outputs)
return batch_end_outputs
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.get_model()
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
import numpy as np
import torch
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.states import RunningStage, TrainerState
from pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing
from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(self, trainer, multiple_trainloader_mode):
self.trainer = trainer
self.early_stopping_accumulator = None
self.checkpoint_accumulator = None
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self.automatic_optimization = True
self._curr_step_result = None
self._cur_grad_norm_dict = None
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
def on_trainer_init(
self,
max_epochs,
min_epochs,
max_steps,
min_steps,
num_sanity_val_steps,
automatic_optimization,
weights_summary,
):
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.interrupted = False
self.trainer.should_stop = False
self.trainer._state = TrainerState.INITIALIZING
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
self.automatic_optimization = automatic_optimization
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
self.trainer.weights_summary = weights_summary
if weights_summary is not None and weights_summary not in ModelSummary.MODES:
raise MisconfigurationException(
f"`weights_summary` can be None, {', '.join(ModelSummary.MODES)}, got {weights_summary}"
)
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
def should_skip_training(self):
should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps
should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
# hook
self.trainer.call_hook("on_train_start")
# provide rank to profiler
self.trainer.profile_connector.on_train_start(self.trainer)
def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# links data to the trainer
self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)
# check that model is configured correctly
self.trainer.config_validator.verify_loop_configurations(model)
# attach model log function to callback
self.trainer.callback_connector.attach_model_logging_functions(model)
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.trainer.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None and self.trainer.training_type_plugin.should_finalize:
self.trainer.logger.finalize("success")
# summarize profile results
if self.trainer.global_rank == 0:
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator_backend.on_train_end()
# clear mem
if self.trainer._device_type == DeviceType.GPU:
model = self.trainer.get_model()
model.cpu()
torch.cuda.empty_cache()
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def check_early_stopping_callback(self, should_update):
# TODO bake this logic into the EarlyStopping callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.trainer.current_epoch = epoch
model = self.trainer.get_model()
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# structured result accumulators for callbacks
self.early_stopping_accumulator = Accumulator()
self.checkpoint_accumulator = Accumulator()
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
# hook
self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model):
if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
hook_overridden = (
is_overridden("training_epoch_end", model=self.trainer.get_model())
or is_overridden("on_train_epoch_end", model=self.trainer.get_model())
)
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if not (hook_overridden or auto_reduce_tng_result):
continue
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def get_optimizers_iterable(self):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
optimizers_loop_length = optimizer_freq_cumsum[-1]
current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
is_result_obj = isinstance(training_step_output, Result)
if is_result_obj:
training_step_output.detach()
else:
training_step_output.batch_loss = training_step_output.batch_loss.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator_backend.training_step(args)
self.trainer.accelerator_backend.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
is_result_obj = isinstance(training_step_output, Result)
if training_step_output_for_epoch_end is None:
return None
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.train_loop.automatic_optimization:
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
if is_result_obj:
closure_loss = training_step_output.minimize
else:
closure_loss = training_step_output.batch_loss
closure_loss = closure_loss / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
hiddens=training_step_output.hiddens,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output
# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
# no need for these checks in 1.0.0
# TODO: remove checks in 1.0.0
is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)
is_1_0_output = is_tensor or ("log" not in training_step_output and "progress_bar" not in training_step_output)
if is_1_0_output:
return self._process_training_step_output_1_0(training_step_output, split_batch)
# -----------------------------------------
# process old dict (deprecate 1.0)
# -----------------------------------------
training_step_output = self.trainer.process_dict_result(training_step_output, train=True)
training_step_output = AttributeDict(
batch_loss=training_step_output[0],
pbar_on_batch_end=training_step_output[1],
log_metrics=training_step_output[2],
callback_metrics=training_step_output[3],
hiddens=training_step_output[4],
)
# if the user decides to finally reduce things in epoch_end, save raw output without graphs
if isinstance(training_step_output_for_epoch_end, torch.Tensor):
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
else:
training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)
return training_step_output_for_epoch_end, training_step_output
def _process_training_step_output_1_0(self, training_step_output, split_batch):
result = self.trainer.get_model()._results
loss = None
hiddens = None
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
result["extra"] = {}
# map to results under the hood
result.minimize = loss
result.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end.cpu()
# what flows back into the system
training_step_output = result
return training_step_output_for_epoch_end, training_step_output
def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.
Option 1:
return loss
Option 2:
return {'loss': loss, 'anything_else': ...}
Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)
training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()
return training_step_output_for_epoch_end
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.get_model()
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.get_model()
grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)
return grad_norm_dict
def process_hiddens(self, opt_closure_result):
hiddens = opt_closure_result.hiddens
if isinstance(opt_closure_result.training_step_output, Result):
opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()
return hiddens
def tbptt_split_batch(self, batch):
splits = [batch]
if self.trainer.truncated_bptt_steps is not None:
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
should_check_val = False
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
batch_end_outputs = self.process_train_step_outputs(
batch_output.training_step_output_for_epoch_end,
self.early_stopping_accumulator,
self.checkpoint_accumulator,
)
# hook
# TODO: add outputs to batches
self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.run_evaluation()
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if self._num_training_batches_reached(is_last_batch):
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
# epoch end hook
self.run_on_epoch_end_hook(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(
epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers
)
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
if should_check_val:
self.trainer.run_evaluation(on_epoch=True)
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
if should_train_only:
# update epoch level lr_schedulers
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
self.check_checkpoint_callback(True)
self.check_early_stopping_callback(True)
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
self.trainer.hiddens = None
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]
if batch is None:
return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self.tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in self.prepare_optimizers():
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform dpp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch, batch_idx, opt_idx, self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# track hiddens
self.trainer.hiddens = self.process_hiddens(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if result is None:
if self.automatic_optimization:
self.warning_cache.warn("training_step returned None if it was on purpose, ignore this warning...")
return None
if not self._skip_backward and self.trainer.train_loop.automatic_optimization:
# backward pass
with self.trainer.profiler.profile("model_backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(result.loss)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.get_model().untoggle_optimizer(opt_idx)
return result
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator_backend.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(interval="step", monitor_metrics=monitor_metrics)
def run_on_epoch_end_hook(self, epoch_output):
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
self.trainer.call_hook('on_train_epoch_end', epoch_output)
self.trainer.call_hook('on_epoch_end')
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step += 1
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):
# decide if we should run validation
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
can_check_val = self.trainer.enable_validation and is_val_check_epoch
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
epoch_end_val_check = self.trainer.val_check_batch == self.trainer.num_training_batches
should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop
or is_last_batch_for_infinite_dataset
) if on_epoch else (is_val_check_batch and not epoch_end_val_check)
return should_check_val and can_check_val
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
args.append(opt_idx)
else:
num_opts = len(self.trainer.optimizers)
raise ValueError(
f"Your LightningModule defines {num_opts} optimizers but "
f'training_step is missing the "optimizer_idx" argument.'
)
# pass hiddens if using tbptt
if self.trainer.truncated_bptt_steps is not None:
args.append(hiddens)
return args
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):
"""
Figure out what needs to be tracked/logged at the end of the epoch
"""
# the training step outputs a list per optimizer. The list contains the outputs at each time step
# when no TBPTT is used, then the list has 1 item per batch
# when TBPTT IS used, then the list has n items (1 per time step)
batch_end_outputs = []
for optimizer_idx_outputs in all_train_step_outputs:
# extract one representative sample from each time step (1 if no tbptt) and 0th optimizer
if len(optimizer_idx_outputs) == 0:
continue
sample_output = optimizer_idx_outputs[-1]
# pull out callback info if available (ie: Results object)
if isinstance(sample_output, dict) and "early_stop_on" in sample_output:
early_stopping_accumulator.accumulate(sample_output["early_stop_on"])
if isinstance(sample_output, dict) and "checkpoint_on" in sample_output:
checkpoint_accumulator.accumulate(sample_output["checkpoint_on"])
batch_end_outputs.append(optimizer_idx_outputs)
return batch_end_outputs
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.get_model()
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
|
from typing import List
from aiogram.types import KeyboardButton, ReplyKeyboardMarkup
from keyboards import emojis
from repository import Repository
PART_NAMES: List[str] = ["head", "story", "essence", "proofs", "claims", "additions"]
def get_claim_parts_kb(user_id: int) -> ReplyKeyboardMarkup:
parts_status: dict = get_claim_parts_status(user_id)
claim_parts_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
claim_parts_kb\
.add(KeyboardButton(f"{emojis.top_hat} шапка {emojis.check_mark if parts_status["head"] is True else ""}")) \
.add(KeyboardButton(f"{emojis.speech_balloon} фабула {emojis.check_mark if parts_status["story"] is True else ""}")) \
.add(KeyboardButton(f"{emojis.key} суть нарушения {emojis.check_mark if parts_status["essence"] is True else ""}")) \
.add(KeyboardButton(f"{emojis.page_with_curl} доказательства {emojis.check_mark if parts_status["proofs"] is True else ""}")) \
.add(KeyboardButton(f"{emojis.index_pointing_up} требования {emojis.check_mark if parts_status["claims"] is True else ""}")) \
.add(KeyboardButton(f"{emojis.card_index_dividers} приложения {emojis.check_mark if parts_status["additions"] is True else ""}"))
claim_parts_kb.row(*[KeyboardButton(f"{emojis.left_arrow} к шаблонам"),
KeyboardButton(f"{emojis.inbox_tray} получить")])
return claim_parts_kb
def get_claim_parts_status(user_id: int) -> dict:
repository: Repository = Repository()
claim_data: dict = repository.get_claim_data(user_id)
if "claim_data" not in claim_data.keys():
return {pn: False for pn in PART_NAMES}
parts_status: dict = {}
for part_name in PART_NAMES:
if part_name in claim_data["claim_data"].keys():
parts_status.update(**{part_name: True})
else:
parts_status.update(**{part_name: False})
return parts_status
|
from typing import List
from aiogram.types import KeyboardButton, ReplyKeyboardMarkup
from keyboards import emojis
from repository import Repository
PART_NAMES: List[str] = ["head", "story", "essence", "proofs", "claims", "additions"]
def get_claim_parts_kb(user_id: int) -> ReplyKeyboardMarkup:
parts_status: dict = get_claim_parts_status(user_id)
claim_parts_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
claim_parts_kb\
.add(KeyboardButton(f"{emojis.top_hat} шапка {emojis.check_mark if parts_status['head'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.speech_balloon} фабула {emojis.check_mark if parts_status['story'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.key} суть нарушения {emojis.check_mark if parts_status['essence'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.page_with_curl} доказательства {emojis.check_mark if parts_status['proofs'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.index_pointing_up} требования {emojis.check_mark if parts_status['claims'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.card_index_dividers} приложения {emojis.check_mark if parts_status['additions'] is True else ''}"))
claim_parts_kb.row(*[KeyboardButton(f"{emojis.left_arrow} к шаблонам"),
KeyboardButton(f"{emojis.inbox_tray} получить")])
return claim_parts_kb
def get_claim_parts_status(user_id: int) -> dict:
repository: Repository = Repository()
claim_data: dict = repository.get_claim_data(user_id)
if "claim_data" not in claim_data.keys():
return {pn: False for pn in PART_NAMES}
parts_status: dict = {}
for part_name in PART_NAMES:
if part_name in claim_data["claim_data"].keys():
parts_status.update(**{part_name: True})
else:
parts_status.update(**{part_name: False})
return parts_status
|
import email
import jwt
import datetime
from models.users import User
from bson.objectid import ObjectId
from utils.email_util import sent_email
from flask import jsonify, make_response
from special_variables import _secret_key
from utils.token_util import token_required
from flask_bcrypt import generate_password_hash, check_password_hash
def sign_up_controller(doc):
try:
user = User.objects(email = doc.get('email', None)).first()
if user:
return make_response(jsonify({'msg':'user already exists'}), 400)
if not (doc and doc.get('email', None)):
return make_response(jsonify({'msg':'email and password are required'}), 400)
token = jwt.encode({'email': doc.get('email'), 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes = 30)}, _secret_key, 'HS256')
subject = 'Registration Token'
return sent_email(subject, [doc.get('email')], token), 200
except Exception as e:
return make_response(jsonify({'msg': e.args }), 400)
@token_required
def registration_controller(data, doc):
try:
user = User.objects(email = data.get('email', None)).first()
if user:
return make_response(jsonify({'msg':'user already exists'}), 400)
if not (doc and doc.get('password', None)):
return make_response(jsonify({'msg':'password is required'}), 400)
user = User(email = data.get('email'), password = generate_password_hash(doc.get('password', None)), created_at = datetime.datetime.now(), updated_at = datetime.datetime.now())
user.save()
return make_response(jsonify({"msg": "user created successfully"}), 201)
except Exception as e:
return make_response(jsonify({'msg': e.args }), 400)
def get_users_controller():
try:
users = [{'email': user.email} for user in User.objects]
if not users:
return make_response(jsonify({'msg': "users list is empty"}), 404)
return make_response(jsonify({'users': users}), 200)
except Exception as e:
return make_response(jsonify({'msg': e.args }), 400)
def get_user_controller(id):
try:
user = User.objects(id = ObjectId(id)).first()
if not user:
return make_response(jsonify({"msg": f"user not found, with id: {id}"}), 404)
return make_response(jsonify({'email': user.email}), 200)
except Exception as e:
return make_response(jsonify({'msg':e.args }), 400)
@token_required
def delete_user_controller(data, id, doc):
try:
user = User.objects(id = ObjectId(id)).first()
if not user:
return make_response(jsonify({"msg": f"user not found, with id: {id}"}), 404)
if not (doc and doc.get('email', None) and doc.get('password', None)):
return make_response(jsonify({'msg':'email and password are required'}), 400)
if not (user.email == doc.get('email') and check_password_hash(user.password[2:-1], doc['password'])):
return make_response(jsonify({'msg':'wrong email or password'}), 400)
user.delete()
return make_response(jsonify({"msg": f"user deleted successfully, with id: {id}"}), 204)
except Exception as e:
return make_response(jsonify({'msg':e.args}), 400)
def user_login_controller(doc):
try:
user = User.objects(email = doc.get('email', None)).first()
if not (user and doc.get('password', None)):
return make_response(jsonify({"msg": f"user not exists or incorrect password", "required fields": ['email', 'password'] }), 404)
if user.password[0] != '$':
password = user.password.split("'")[1]
else:
password = user.password
if not check_password_hash(password, doc['password']):
return make_response(jsonify({"msg": "password is incorrect"}))
token = jwt.encode({'email': user.email, 'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=24)}, _secret_key, 'HS256')
return make_response(jsonify({"msg": f"LoggedIn successfully", "token": token}), 200)
except Exception as e:
return make_response(jsonify({'msg':f'{e.args} or invalid data'}), 400)
def forget_password_controller(doc):
try:
email = doc.get('email', None)
user = User.objects(email = email).first()
if not user:
return make_response(jsonify({'msg':f'user not found, with email {email}' } ), 404)
token = jwt.encode({'email': user.email, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes = 20)}, _secret_key, 'HS256')
subject = 'Forget Password Token'
return sent_email(subject, [email], token)
except Exception as e:
return make_response(jsonify({'msg': 'invalid data'}), 400)
@token_required
def reset_password_controller(data, doc):
try:
new_password = doc.get('new_password', None)
if not new_password:
return make_response(jsonify({'msg': 'new password is required'}), 400)
user = User.objects(email = data['email']).first()
if not user:
return make_response(jsonify({"msg": f"user not found, with email: {data["email"]}"}), 404)
user.update(email = user['email'], password = str(generate_password_hash(new_password)), updated_at = datetime.datetime.now())
subject = 'Password reset successful'
body = f'your password has been reset successfully, your new password is: {new_password}'
return sent_email(subject, [user.email], body)
except Exception as e:
return make_response(jsonify({'msg':e.args, 'status': 500}))
|
import email
import jwt
import datetime
from models.users import User
from bson.objectid import ObjectId
from utils.email_util import sent_email
from flask import jsonify, make_response
from special_variables import _secret_key
from utils.token_util import token_required
from flask_bcrypt import generate_password_hash, check_password_hash
def sign_up_controller(doc):
try:
user = User.objects(email = doc.get('email', None)).first()
if user:
return make_response(jsonify({'msg':'user already exists'}), 400)
if not (doc and doc.get('email', None)):
return make_response(jsonify({'msg':'email and password are required'}), 400)
token = jwt.encode({'email': doc.get('email'), 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes = 30)}, _secret_key, 'HS256')
subject = 'Registration Token'
return sent_email(subject, [doc.get('email')], token), 200
except Exception as e:
return make_response(jsonify({'msg': e.args }), 400)
@token_required
def registration_controller(data, doc):
try:
user = User.objects(email = data.get('email', None)).first()
if user:
return make_response(jsonify({'msg':'user already exists'}), 400)
if not (doc and doc.get('password', None)):
return make_response(jsonify({'msg':'password is required'}), 400)
user = User(email = data.get('email'), password = generate_password_hash(doc.get('password', None)), created_at = datetime.datetime.now(), updated_at = datetime.datetime.now())
user.save()
return make_response(jsonify({"msg": "user created successfully"}), 201)
except Exception as e:
return make_response(jsonify({'msg': e.args }), 400)
def get_users_controller():
try:
users = [{'email': user.email} for user in User.objects]
if not users:
return make_response(jsonify({'msg': "users list is empty"}), 404)
return make_response(jsonify({'users': users}), 200)
except Exception as e:
return make_response(jsonify({'msg': e.args }), 400)
def get_user_controller(id):
try:
user = User.objects(id = ObjectId(id)).first()
if not user:
return make_response(jsonify({"msg": f"user not found, with id: {id}"}), 404)
return make_response(jsonify({'email': user.email}), 200)
except Exception as e:
return make_response(jsonify({'msg':e.args }), 400)
@token_required
def delete_user_controller(data, id, doc):
try:
user = User.objects(id = ObjectId(id)).first()
if not user:
return make_response(jsonify({"msg": f"user not found, with id: {id}"}), 404)
if not (doc and doc.get('email', None) and doc.get('password', None)):
return make_response(jsonify({'msg':'email and password are required'}), 400)
if not (user.email == doc.get('email') and check_password_hash(user.password[2:-1], doc['password'])):
return make_response(jsonify({'msg':'wrong email or password'}), 400)
user.delete()
return make_response(jsonify({"msg": f"user deleted successfully, with id: {id}"}), 204)
except Exception as e:
return make_response(jsonify({'msg':e.args}), 400)
def user_login_controller(doc):
try:
user = User.objects(email = doc.get('email', None)).first()
if not (user and doc.get('password', None)):
return make_response(jsonify({"msg": f"user not exists or incorrect password", "required fields": ['email', 'password'] }), 404)
if user.password[0] != '$':
password = user.password.split("'")[1]
else:
password = user.password
if not check_password_hash(password, doc['password']):
return make_response(jsonify({"msg": "password is incorrect"}))
token = jwt.encode({'email': user.email, 'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=24)}, _secret_key, 'HS256')
return make_response(jsonify({"msg": f"LoggedIn successfully", "token": token}), 200)
except Exception as e:
return make_response(jsonify({'msg':f'{e.args} or invalid data'}), 400)
def forget_password_controller(doc):
try:
email = doc.get('email', None)
user = User.objects(email = email).first()
if not user:
return make_response(jsonify({'msg':f'user not found, with email {email}' } ), 404)
token = jwt.encode({'email': user.email, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes = 20)}, _secret_key, 'HS256')
subject = 'Forget Password Token'
return sent_email(subject, [email], token)
except Exception as e:
return make_response(jsonify({'msg': 'invalid data'}), 400)
@token_required
def reset_password_controller(data, doc):
try:
new_password = doc.get('new_password', None)
if not new_password:
return make_response(jsonify({'msg': 'new password is required'}), 400)
user = User.objects(email = data['email']).first()
if not user:
return make_response(jsonify({"msg": f"user not found, with email: {data['email']}"}), 404)
user.update(email = user['email'], password = str(generate_password_hash(new_password)), updated_at = datetime.datetime.now())
subject = 'Password reset successful'
body = f'your password has been reset successfully, your new password is: {new_password}'
return sent_email(subject, [user.email], body)
except Exception as e:
return make_response(jsonify({'msg':e.args, 'status': 500}))
|
import datetime
import hashlib
import time
from collections import namedtuple, OrderedDict
from copy import copy
from itertools import chain
import csv
import gevent
from .exception import StopUser, CatchResponseError
import logging
console_logger = logging.getLogger("locust.stats_logger")
STATS_NAME_WIDTH = 60
STATS_TYPE_WIDTH = 8
"""Default interval for how frequently results are written to console."""
CONSOLE_STATS_INTERVAL_SEC = 2
"""Default interval for how frequently results are written to history."""
HISTORY_STATS_INTERVAL_SEC = 5
"""Default interval for how frequently CSV files are written if this option is configured."""
CSV_STATS_INTERVAL_SEC = 1
CSV_STATS_FLUSH_INTERVAL_SEC = 10
"""
Default window size/resolution - in seconds - when calculating the current
response time percentile
"""
CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW = 10
CachedResponseTimes = namedtuple("CachedResponseTimes", ["response_times", "num_requests"])
PERCENTILES_TO_REPORT = [0.50, 0.66, 0.75, 0.80, 0.90, 0.95, 0.98, 0.99, 0.999, 0.9999, 1.0]
class RequestStatsAdditionError(Exception):
pass
def get_readable_percentiles(percentile_list):
"""
Converts a list of percentiles from 0-1 fraction to 0%-100% view for using in console & csv reporting
:param percentile_list: The list of percentiles in range 0-1
:return: The list of string representation for each percentile in 0%-100% view
"""
return [
f"{int(percentile * 100) if (percentile * 100).is_integer() else round(100 * percentile, 6)}%"
for percentile in percentile_list
]
def calculate_response_time_percentile(response_times, num_requests, percent):
"""
Get the response time that a certain number of percent of the requests
finished within. Arguments:
response_times: A StatsEntry.response_times dict
num_requests: Number of request made (could be derived from response_times,
but we save some CPU cycles by using the value which we already store)
percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0
"""
num_of_request = int((num_requests * percent))
processed_count = 0
for response_time in sorted(response_times.keys(), reverse=True):
processed_count += response_times[response_time]
if num_requests - processed_count <= num_of_request:
return response_time
# if all response times were None
return 0
def calculate_response_time_average(response_times, num_requests):
"""
Get the response time that a certain number of percent of the requests
finished within. Arguments:
response_times: A StatsEntry.response_times dict
num_requests: Number of request made (could be derived from response_times,
but we save some CPU cycles by using the value which we already store)
percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0
"""
num_of_request = int(num_requests)
sum_val = 0
processed_count = 0
for response_time in sorted(response_times.keys(), reverse=True):
processed_count += response_times[response_time]
sum_val += response_time * response_times[response_time]
num_of_request = processed_count
if num_of_request > 0:
return int(sum_val / float(num_of_request))
else:
return 0
def calculate_response_time_max(response_times, num_requests):
"""
Get the response time that a certain number of percent of the requests
finished within. Arguments:
response_times: A StatsEntry.response_times dict
num_requests: Number of request made (could be derived from response_times,
but we save some CPU cycles by using the value which we already store)
percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0
"""
num_of_request = int(num_requests)
max_val = 0
processed_count = 0
for response_time in sorted(response_times.keys(), reverse=True):
processed_count += response_times[response_time]
if response_time > max_val:
max_val = response_time
if max_val is None:
return None
return int(max_val)
def calculate_response_time_min(response_times, num_requests):
"""
Get the response time that a certain number of percent of the requests
finished within. Arguments:
response_times: A StatsEntry.response_times dict
num_requests: Number of request made (could be derived from response_times,
but we save some CPU cycles by using the value which we already store)
percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0
"""
num_of_request = int(num_requests)
min_val = None
processed_count = 0
for response_time in sorted(response_times.keys(), reverse=True):
processed_count += response_times[response_time]
if min_val is None:
min_val = response_time
elif response_time < min_val:
min_val = response_time
if min_val is None:
return None
return int(min_val)
def diff_response_time_dicts(latest, old):
"""
Returns the delta between two {response_times:request_count} dicts.
Used together with the response_times cache to get the response times for the
last X seconds, which in turn is used to calculate the current response time
percentiles.
"""
new = {}
for t in latest:
diff = latest[t] - old.get(t, 0)
if diff:
new[t] = diff
return new
class RequestStats:
"""
Class that holds the request statistics.
"""
def __init__(self, use_response_times_cache=True):
"""
:param use_response_times_cache: The value of use_response_times_cache will be set for each StatsEntry()
when they are created. Settings it to False saves some memory and CPU
cycles which we can do on Worker nodes where the response_times_cache
is not needed.
"""
self.use_response_times_cache = use_response_times_cache
self.entries = {}
self.errors = {}
self.total = StatsEntry(self, "Aggregated", None, use_response_times_cache=self.use_response_times_cache)
self.history = []
@property
def num_requests(self):
return self.total.num_requests
@property
def num_none_requests(self):
return self.total.num_none_requests
@property
def num_failures(self):
return self.total.num_failures
@property
def last_request_timestamp(self):
return self.total.last_request_timestamp
@property
def start_time(self):
return self.total.start_time
def log_request(self, method, name, response_time, content_length):
self.total.log(response_time, content_length)
self.get(name, method).log(response_time, content_length)
def log_error(self, method, name, error):
self.total.log_error(error)
self.get(name, method).log_error(error)
# store error in errors dict
key = StatsError.create_key(method, name, error)
entry = self.errors.get(key)
if not entry:
entry = StatsError(method, name, error)
self.errors[key] = entry
entry.occurred()
def get(self, name, method):
"""
Retrieve a StatsEntry instance by name and method
"""
entry = self.entries.get((name, method))
if not entry:
entry = StatsEntry(self, name, method, use_response_times_cache=self.use_response_times_cache)
self.entries[(name, method)] = entry
return entry
def reset_all(self):
"""
Go through all stats entries and reset them to zero
"""
self.total.reset()
self.errors = {}
for r in self.entries.values():
r.reset()
self.history = []
def clear_all(self):
"""
Remove all stats entries and errors
"""
self.total = StatsEntry(self, "Aggregated", None, use_response_times_cache=self.use_response_times_cache)
self.entries = {}
self.errors = {}
self.history = []
def serialize_stats(self):
return [
self.entries[key].get_stripped_report()
for key in self.entries.keys()
if not (self.entries[key].num_requests == 0 and self.entries[key].num_failures == 0)
]
def serialize_errors(self):
return dict([(k, e.to_dict()) for k, e in self.errors.items()])
class StatsEntry:
"""
Represents a single stats entry (name and method)
"""
name = None
""" Name (URL) of this stats entry """
method = None
""" Method (GET, POST, PUT, etc.) """
num_requests = None
""" The number of requests made """
num_none_requests = None
""" The number of requests made with a None response time (typically async requests) """
num_failures = None
""" Number of failed request """
total_response_time = None
""" Total sum of the response times """
min_response_time = None
""" Minimum response time """
max_response_time = None
""" Maximum response time """
num_reqs_per_sec = None
""" A {second => request_count} dict that holds the number of requests made per second """
num_fail_per_sec = None
""" A (second => failure_count) dict that hold the number of failures per second """
response_times = None
"""
A {response_time => count} dict that holds the response time distribution of all
the requests.
The keys (the response time in ms) are rounded to store 1, 2, ... 9, 10, 20. .. 90,
100, 200 .. 900, 1000, 2000 ... 9000, in order to save memory.
This dict is used to calculate the median and percentile response times.
"""
use_response_times_cache = False
"""
If set to True, the copy of the response_time dict will be stored in response_times_cache
every second, and kept for 20 seconds (by default, will be CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10).
We can use this dict to calculate the *current* median response time, as well as other response
time percentiles.
"""
response_times_cache = None
"""
If use_response_times_cache is set to True, this will be a {timestamp => CachedResponseTimes()}
OrderedDict that holds a copy of the response_times dict for each of the last 20 seconds.
"""
total_content_length = None
""" The sum of the content length of all the requests for this entry """
start_time = None
""" Time of the first request for this entry """
last_request_timestamp = None
""" Time of the last request for this entry """
def __init__(self, stats, name, method, use_response_times_cache=False):
self.stats = stats
self.name = name
self.method = method
self.use_response_times_cache = use_response_times_cache
self.reset()
def reset(self):
self.start_time = time.time()
self.num_requests = 0
self.num_none_requests = 0
self.num_failures = 0
self.total_response_time = 0
self.response_times = {}
self.min_response_time = None
self.max_response_time = 0
self.last_request_timestamp = None
self.num_reqs_per_sec = {}
self.num_fail_per_sec = {}
self.total_content_length = 0
if self.use_response_times_cache:
self.response_times_cache = OrderedDict()
self._cache_response_times(int(time.time()))
def log(self, response_time, content_length):
# get the time
current_time = time.time()
t = int(current_time)
if self.use_response_times_cache and self.last_request_timestamp and t > int(self.last_request_timestamp):
# see if we shall make a copy of the response_times dict and store in the cache
self._cache_response_times(t - 1)
self.num_requests += 1
self._log_time_of_request(current_time)
self._log_response_time(response_time)
# increase total content-length
self.total_content_length += content_length
def _log_time_of_request(self, current_time):
t = int(current_time)
self.num_reqs_per_sec[t] = self.num_reqs_per_sec.setdefault(t, 0) + 1
self.last_request_timestamp = current_time
def _log_response_time(self, response_time):
if response_time is None:
self.num_none_requests += 1
return
self.total_response_time += response_time
if self.min_response_time is None:
self.min_response_time = response_time
self.min_response_time = min(self.min_response_time, response_time)
self.max_response_time = max(self.max_response_time, response_time)
# to avoid to much data that has to be transferred to the master node when
# running in distributed mode, we save the response time rounded in a dict
# so that 147 becomes 150, 3432 becomes 3400 and 58760 becomes 59000
if response_time < 100:
rounded_response_time = round(response_time)
elif response_time < 1000:
rounded_response_time = round(response_time, -1)
elif response_time < 10000:
rounded_response_time = round(response_time, -2)
else:
rounded_response_time = round(response_time, -3)
# increase request count for the rounded key in response time dict
self.response_times.setdefault(rounded_response_time, 0)
self.response_times[rounded_response_time] += 1
def log_error(self, error):
self.num_failures += 1
t = int(time.time())
self.num_fail_per_sec[t] = self.num_fail_per_sec.setdefault(t, 0) + 1
@property
def fail_ratio(self):
try:
return float(self.num_failures) / self.num_requests
except ZeroDivisionError:
if self.num_failures > 0:
return 1.0
else:
return 0.0
@property
def avg_response_time(self):
try:
return float(self.total_response_time) / (self.num_requests - self.num_none_requests)
except ZeroDivisionError:
return 0
@property
def median_response_time(self):
if not self.response_times:
return 0
median = median_from_dict(self.num_requests - self.num_none_requests, self.response_times) or 0
# Since we only use two digits of precision when calculating the median response time
# while still using the exact values for min and max response times, the following checks
# makes sure that we don't report a median > max or median < min when a StatsEntry only
# have one (or very few) really slow requests
if median > self.max_response_time:
median = self.max_response_time
elif median < self.min_response_time:
median = self.min_response_time
return median
@property
def current_rps(self):
if self.stats.last_request_timestamp is None:
return 0
slice_start_time = max(int(self.stats.last_request_timestamp) - 12, int(self.stats.start_time or 0))
reqs = [
self.num_reqs_per_sec.get(t, 0) for t in range(slice_start_time, int(self.stats.last_request_timestamp) - 2)
]
return avg(reqs)
@property
def current_fail_per_sec(self):
if self.stats.last_request_timestamp is None:
return 0
slice_start_time = max(int(self.stats.last_request_timestamp) - 12, int(self.stats.start_time or 0))
reqs = [
self.num_fail_per_sec.get(t, 0) for t in range(slice_start_time, int(self.stats.last_request_timestamp) - 2)
]
return avg(reqs)
@property
def total_rps(self):
if not self.stats.last_request_timestamp or not self.stats.start_time:
return 0.0
try:
return self.num_requests / (self.stats.last_request_timestamp - self.stats.start_time)
except ZeroDivisionError:
return 0.0
@property
def total_fail_per_sec(self):
if not self.stats.last_request_timestamp or not self.stats.start_time:
return 0.0
try:
return self.num_failures / (self.stats.last_request_timestamp - self.stats.start_time)
except ZeroDivisionError:
return 0.0
@property
def avg_content_length(self):
try:
return self.total_content_length / self.num_requests
except ZeroDivisionError:
return 0
def extend(self, other):
"""
Extend the data from the current StatsEntry with the stats from another
StatsEntry instance.
"""
# save the old last_request_timestamp, to see if we should store a new copy
# of the response times in the response times cache
old_last_request_timestamp = self.last_request_timestamp
if self.last_request_timestamp is not None and other.last_request_timestamp is not None:
self.last_request_timestamp = max(self.last_request_timestamp, other.last_request_timestamp)
elif other.last_request_timestamp is not None:
self.last_request_timestamp = other.last_request_timestamp
self.start_time = min(self.start_time, other.start_time)
self.num_requests = self.num_requests + other.num_requests
self.num_none_requests = self.num_none_requests + other.num_none_requests
self.num_failures = self.num_failures + other.num_failures
self.total_response_time = self.total_response_time + other.total_response_time
self.max_response_time = max(self.max_response_time, other.max_response_time)
if self.min_response_time is not None and other.min_response_time is not None:
self.min_response_time = min(self.min_response_time, other.min_response_time)
elif other.min_response_time is not None:
# this means self.min_response_time is None, so we can safely replace it
self.min_response_time = other.min_response_time
self.total_content_length = self.total_content_length + other.total_content_length
for key in other.response_times:
self.response_times[key] = self.response_times.get(key, 0) + other.response_times[key]
for key in other.num_reqs_per_sec:
self.num_reqs_per_sec[key] = self.num_reqs_per_sec.get(key, 0) + other.num_reqs_per_sec[key]
for key in other.num_fail_per_sec:
self.num_fail_per_sec[key] = self.num_fail_per_sec.get(key, 0) + other.num_fail_per_sec[key]
if self.use_response_times_cache:
# If we've entered a new second, we'll cache the response times. Note that there
# might still be reports from other worker nodes - that contains requests for the same
# time periods - that hasn't been received/accounted for yet. This will cause the cache to
# lag behind a second or two, but since StatsEntry.current_response_time_percentile()
# (which is what the response times cache is used for) uses an approximation of the
# last 10 seconds anyway, it should be fine to ignore this.
last_time = self.last_request_timestamp and int(self.last_request_timestamp) or None
if last_time and last_time > (old_last_request_timestamp and int(old_last_request_timestamp) or 0):
self._cache_response_times(last_time)
def serialize(self):
return {
"name": self.name,
"method": self.method,
"last_request_timestamp": self.last_request_timestamp,
"start_time": self.start_time,
"num_requests": self.num_requests,
"num_none_requests": self.num_none_requests,
"num_failures": self.num_failures,
"total_response_time": self.total_response_time,
"max_response_time": self.max_response_time,
"min_response_time": self.min_response_time,
"total_content_length": self.total_content_length,
"response_times": self.response_times,
"num_reqs_per_sec": self.num_reqs_per_sec,
"num_fail_per_sec": self.num_fail_per_sec,
}
@classmethod
def unserialize(cls, data):
obj = cls(None, data["name"], data["method"])
for key in [
"last_request_timestamp",
"start_time",
"num_requests",
"num_none_requests",
"num_failures",
"total_response_time",
"max_response_time",
"min_response_time",
"total_content_length",
"response_times",
"num_reqs_per_sec",
"num_fail_per_sec",
]:
setattr(obj, key, data[key])
return obj
def get_stripped_report(self):
"""
Return the serialized version of this StatsEntry, and then clear the current stats.
"""
report = self.serialize()
self.reset()
return report
def to_string(self, current=True):
"""
Return the stats as a string suitable for console output. If current is True, it'll show
the RPS and failure rate for the last 10 seconds. If it's false, it'll show the total stats
for the whole run.
"""
if current:
rps = self.current_rps
fail_per_sec = self.current_fail_per_sec
else:
rps = self.total_rps
fail_per_sec = self.total_fail_per_sec
return (" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s | %7d %7d %7d %7d | %7.2f %7.2f") % (
(self.method and self.method + " " or "") + self.name,
self.num_requests,
"%d(%.2f%%)" % (self.num_failures, self.fail_ratio * 100),
self.avg_response_time,
self.min_response_time or 0,
self.max_response_time,
self.median_response_time or 0,
rps or 0,
fail_per_sec or 0,
)
def __str__(self):
return self.to_string(current=True)
def get_response_time_percentile(self, percent):
"""
Get the response time that a certain number of percent of the requests
finished within.
Percent specified in range: 0.0 - 1.0
"""
return calculate_response_time_percentile(self.response_times, self.num_requests, percent)
def get_current_response_time_average(self):
"""
Calculate the *current* response time for a certain percentile. We use a sliding
window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
when calculating this.
"""
if not self.use_response_times_cache:
raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
# First, we want to determine which of the cached response_times dicts we should
# use to get response_times for approximately 10 seconds ago.
t = int(time.time())
# Since we can't be sure that the cache contains an entry for every second.
# We'll construct a list of timestamps which we consider acceptable keys to be used
# when trying to fetch the cached response_times. We construct this list in such a way
# that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8,
# and so on
acceptable_timestamps = []
for i in range(9):
acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-i)
acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW+i)
cached = None
for ts in acceptable_timestamps:
if ts in self.response_times_cache:
cached = self.response_times_cache[ts]
break
if cached:
# If we fond an acceptable cached response times, we'll calculate a new response
# times dict of the last 10 seconds (approximately) by diffing it with the current
# total response times. Then we'll use that to calculate a response time percentile
# for that timeframe
return calculate_response_time_average(
diff_response_time_dicts(self.response_times, cached.response_times),
self.num_requests - cached.num_requests,
)
def get_current_response_time_max(self):
"""
Calculate the *current* response time for a certain percentile. We use a sliding
window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
when calculating this.
"""
if not self.use_response_times_cache:
raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
# First, we want to determine which of the cached response_times dicts we should
# use to get response_times for approximately 10 seconds ago.
t = int(time.time())
# Since we can't be sure that the cache contains an entry for every second.
# We'll construct a list of timestamps which we consider acceptable keys to be used
# when trying to fetch the cached response_times. We construct this list in such a way
# that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8,
# and so on
acceptable_timestamps = []
for i in range(9):
acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-i)
acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW+i)
cached = None
for ts in acceptable_timestamps:
if ts in self.response_times_cache:
cached = self.response_times_cache[ts]
break
if cached:
# If we fond an acceptable cached response times, we'll calculate a new response
# times dict of the last 10 seconds (approximately) by diffing it with the current
# total response times. Then we'll use that to calculate a response time percentile
# for that timeframe
return calculate_response_time_max(
diff_response_time_dicts(self.response_times, cached.response_times),
self.num_requests - cached.num_requests,
)
def get_current_response_time_min(self):
"""
Calculate the *current* response time for a certain percentile. We use a sliding
window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
when calculating this.
"""
if not self.use_response_times_cache:
raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
# First, we want to determine which of the cached response_times dicts we should
# use to get response_times for approximately 10 seconds ago.
t = int(time.time())
# Since we can't be sure that the cache contains an entry for every second.
# We'll construct a list of timestamps which we consider acceptable keys to be used
# when trying to fetch the cached response_times. We construct this list in such a way
# that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8,
# and so on
acceptable_timestamps = []
for i in range(9):
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW - i)
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + i)
cached = None
for ts in acceptable_timestamps:
if ts in self.response_times_cache:
cached = self.response_times_cache[ts]
break
if cached:
# If we fond an acceptable cached response times, we'll calculate a new response
# times dict of the last 10 seconds (approximately) by diffing it with the current
# total response times. Then we'll use that to calculate a response time percentile
# for that timeframe
return calculate_response_time_min(
diff_response_time_dicts(self.response_times, cached.response_times),
self.num_requests - cached.num_requests,
)
def get_current_response_time_percentile(self, percent):
"""
Calculate the *current* response time for a certain percentile. We use a sliding
window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
when calculating this.
"""
if not self.use_response_times_cache:
raise ValueError(
"StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile"
)
# First, we want to determine which of the cached response_times dicts we should
# use to get response_times for approximately 10 seconds ago.
t = int(time.time())
# Since we can't be sure that the cache contains an entry for every second.
# We'll construct a list of timestamps which we consider acceptable keys to be used
# when trying to fetch the cached response_times. We construct this list in such a way
# that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8,
# and so on
acceptable_timestamps = []
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
for i in range(1, 9):
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW - i)
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + i)
cached = None
for ts in acceptable_timestamps:
if ts in self.response_times_cache:
cached = self.response_times_cache[ts]
break
if cached:
# If we fond an acceptable cached response times, we'll calculate a new response
# times dict of the last 10 seconds (approximately) by diffing it with the current
# total response times. Then we'll use that to calculate a response time percentile
# for that timeframe
return calculate_response_time_percentile(
diff_response_time_dicts(self.response_times, cached.response_times),
self.num_requests - cached.num_requests,
percent,
)
def percentile(self):
if not self.num_requests:
raise ValueError("Can't calculate percentile on url with no successful requests")
tpl = f" %-{str(STATS_TYPE_WIDTH)}s %-{str(STATS_NAME_WIDTH)}s %8d {" ".join(["%6d"] * len(PERCENTILES_TO_REPORT))}"
return tpl % (
(self.method, self.name)
+ tuple([self.get_response_time_percentile(p) for p in PERCENTILES_TO_REPORT])
+ (self.num_requests,)
)
def _cache_response_times(self, t):
self.response_times_cache[t] = CachedResponseTimes(
response_times=copy(self.response_times),
num_requests=self.num_requests,
)
# We'll use a cache size of CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10 since - in the extreme case -
# we might still use response times (from the cache) for t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-10
# to calculate the current response time percentile, if we're missing cached values for the subsequent
# 20 seconds
cache_size = CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10
if len(self.response_times_cache) > cache_size:
# only keep the latest 20 response_times dicts
for i in range(len(self.response_times_cache) - cache_size):
self.response_times_cache.popitem(last=False)
class StatsError:
def __init__(self, method, name, error, occurrences=0):
self.method = method
self.name = name
self.error = error
self.occurrences = occurrences
@classmethod
def parse_error(cls, error):
string_error = repr(error)
target = "object at 0x"
target_index = string_error.find(target)
if target_index < 0:
return string_error
start = target_index + len(target) - 2
end = string_error.find(">", start)
if end < 0:
return string_error
hex_address = string_error[start:end]
return string_error.replace(hex_address, "0x....")
@classmethod
def create_key(cls, method, name, error):
key = "%s.%s.%r" % (method, name, StatsError.parse_error(error))
return hashlib.md5(key.encode("utf-8")).hexdigest()
def occurred(self):
self.occurrences += 1
def to_name(self):
error = self.error
if isinstance(error, CatchResponseError):
# standalone
unwrapped_error = error.args[0]
if isinstance(error, str) and error.startswith("CatchResponseError("):
# distributed
length = len("CatchResponseError(")
unwrapped_error = error[length:-1]
else:
# standalone, unwrapped exception
unwrapped_error = repr(error)
return "%s %s: %s" % (self.method, self.name, unwrapped_error)
def to_dict(self):
return {
"method": self.method,
"name": self.name,
"error": StatsError.parse_error(self.error),
"occurrences": self.occurrences,
}
@classmethod
def from_dict(cls, data):
return cls(data["method"], data["name"], data["error"], data["occurrences"])
def avg(values):
return sum(values, 0.0) / max(len(values), 1)
def median_from_dict(total, count):
"""
total is the number of requests made
count is a dict {response_time: count}
"""
pos = (total - 1) / 2
for k in sorted(count.keys()):
if pos < count[k]:
return k
pos -= count[k]
def setup_distributed_stats_event_listeners(events, stats):
def on_report_to_master(client_id, data):
data["stats"] = stats.serialize_stats()
data["stats_total"] = stats.total.get_stripped_report()
data["errors"] = stats.serialize_errors()
stats.errors = {}
def on_worker_report(client_id, data):
for stats_data in data["stats"]:
entry = StatsEntry.unserialize(stats_data)
request_key = (entry.name, entry.method)
if not request_key in stats.entries:
stats.entries[request_key] = StatsEntry(stats, entry.name, entry.method, use_response_times_cache=True)
stats.entries[request_key].extend(entry)
for error_key, error in data["errors"].items():
if error_key not in stats.errors:
stats.errors[error_key] = StatsError.from_dict(error)
else:
stats.errors[error_key].occurrences += error["occurrences"]
stats.total.extend(StatsEntry.unserialize(data["stats_total"]))
events.report_to_master.add_listener(on_report_to_master)
events.worker_report.add_listener(on_worker_report)
def print_stats(stats, current=True):
console_logger.info(
(" %-" + str(STATS_NAME_WIDTH) + "s %7s %12s | %7s %7s %7s %7s | %7s %7s")
% ("Name", "# reqs", "# fails", "Avg", "Min", "Max", "Median", "req/s", "failures/s")
)
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
for key in sorted(stats.entries.keys()):
r = stats.entries[key]
console_logger.info(r.to_string(current=current))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
console_logger.info(stats.total.to_string(current=current))
console_logger.info("")
def print_percentile_stats(stats):
console_logger.info("Response time percentiles (approximated)")
headers = ("Type", "Name") + tuple(get_readable_percentiles(PERCENTILES_TO_REPORT)) + ("# reqs",)
console_logger.info(
(
f" %-{str(STATS_TYPE_WIDTH)}s %-{str(STATS_NAME_WIDTH)}s %8s "
f"{" ".join(["%6s"] * len(PERCENTILES_TO_REPORT))}"
)
% headers
)
separator = (
f'{'-' * STATS_TYPE_WIDTH}|{'-' * STATS_NAME_WIDTH}|{'-' * 9}|{('-' * 6 + '|') * len(PERCENTILES_TO_REPORT)}'
)
console_logger.info(separator)
for key in sorted(stats.entries.keys()):
r = stats.entries[key]
if r.response_times:
console_logger.info(r.percentile())
console_logger.info(separator)
if stats.total.response_times:
console_logger.info(stats.total.percentile())
console_logger.info("")
def print_error_report(stats):
if not len(stats.errors):
return
console_logger.info("Error report")
console_logger.info(" %-18s %-100s" % ("# occurrences", "Error"))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
for error in stats.errors.values():
console_logger.info(" %-18i %-100s" % (error.occurrences, error.to_name()))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
console_logger.info("")
def stats_printer(stats):
def stats_printer_func():
while True:
print_stats(stats)
gevent.sleep(CONSOLE_STATS_INTERVAL_SEC)
return stats_printer_func
def sort_stats(stats):
return [stats[key] for key in sorted(stats.keys())]
def stats_history(runner):
"""Save current stats info to history for charts of report."""
while True:
stats = runner.stats
if not stats.total.use_response_times_cache:
break
r = {
"time": datetime.datetime.now().strftime("%H:%M:%S"),
"current_rps": stats.total.current_rps or 0,
"current_fail_per_sec": stats.total.current_fail_per_sec or 0,
"response_time_percentile_95": stats.total.get_current_response_time_percentile(0.95) or 0,
"response_time_percentile_50": stats.total.get_current_response_time_percentile(0.5) or 0,
"user_count": runner.user_count or 0,
}
stats.history.append(r)
gevent.sleep(HISTORY_STATS_INTERVAL_SEC)
class StatsCSV:
"""Write statistics to csv_writer stream."""
def __init__(self, environment, percentiles_to_report):
super().__init__()
self.environment = environment
self.percentiles_to_report = percentiles_to_report
self.percentiles_na = ["N/A"] * len(self.percentiles_to_report)
self.requests_csv_columns = [
"Type",
"Name",
"Request Count",
"Failure Count",
"Median Response Time",
"Average Response Time",
"Min Response Time",
"Max Response Time",
"Average Content Size",
"Requests/s",
"Failures/s",
] + get_readable_percentiles(self.percentiles_to_report)
self.failures_columns = [
"Method",
"Name",
"Error",
"Occurrences",
]
self.exceptions_columns = [
"Count",
"Message",
"Traceback",
"Nodes",
]
def _percentile_fields(self, stats_entry):
return (
[int(stats_entry.get_response_time_percentile(x) or 0) for x in self.percentiles_to_report]
if stats_entry.num_requests
else self.percentiles_na
)
def requests_csv(self, csv_writer):
"""Write requests csv with header and data rows."""
csv_writer.writerow(self.requests_csv_columns)
self._requests_data_rows(csv_writer)
def _requests_data_rows(self, csv_writer):
"""Write requests csv data row, excluding header."""
stats = self.environment.stats
for stats_entry in chain(sort_stats(stats.entries), [stats.total]):
csv_writer.writerow(
chain(
[
stats_entry.method,
stats_entry.name,
stats_entry.num_requests,
stats_entry.num_failures,
stats_entry.median_response_time,
stats_entry.avg_response_time,
stats_entry.min_response_time or 0,
stats_entry.max_response_time,
stats_entry.avg_content_length,
stats_entry.total_rps,
stats_entry.total_fail_per_sec,
],
self._percentile_fields(stats_entry),
)
)
def failures_csv(self, csv_writer):
csv_writer.writerow(self.failures_columns)
self._failures_data_rows(csv_writer)
def _failures_data_rows(self, csv_writer):
for stats_error in sort_stats(self.environment.stats.errors):
csv_writer.writerow(
[
stats_error.method,
stats_error.name,
stats_error.error,
stats_error.occurrences,
]
)
def exceptions_csv(self, csv_writer):
csv_writer.writerow(self.exceptions_columns)
self._exceptions_data_rows(csv_writer)
def _exceptions_data_rows(self, csv_writer):
for exc in self.environment.runner.exceptions.values():
csv_writer.writerow(
[
exc["count"],
exc["msg"],
exc["traceback"],
", ".join(exc["nodes"])
]
)
class StatsCSVFileWriter(StatsCSV):
"""Write statistics to to CSV files"""
def __init__(self, environment, percentiles_to_report, base_filepath, full_history=False):
super().__init__(environment, percentiles_to_report)
self.base_filepath = base_filepath
self.full_history = full_history
self.requests_csv_filehandle = open(self.base_filepath + "_stats.csv", "w")
self.requests_csv_writer = csv.writer(self.requests_csv_filehandle)
self.stats_history_csv_filehandle = open(self.stats_history_file_name(), "w")
self.stats_history_csv_writer = csv.writer(self.stats_history_csv_filehandle)
self.failures_csv_filehandle = open(self.base_filepath + "_failures.csv", "w")
self.failures_csv_writer = csv.writer(self.failures_csv_filehandle)
self.failures_csv_data_start = 0
self.exceptions_csv_filehandle = open(self.base_filepath + "_exceptions.csv", "w")
self.exceptions_csv_writer = csv.writer(self.exceptions_csv_filehandle)
self.exceptions_csv_data_start = 0
self.stats_history_csv_columns = [
"Timestamp",
"User Count",
"Type",
"Name",
"Requests/s",
"Failures/s",
*get_readable_percentiles(self.percentiles_to_report),
"Total Request Count",
"Total Failure Count",
"Total Median Response Time",
"Total Average Response Time",
"Total Min Response Time",
"Total Max Response Time",
"Total Average Content Size",
]
def __call__(self):
self.stats_writer()
def stats_writer(self):
"""Writes all the csv files for the locust run."""
# Write header row for all files and save position for non-append files
self.requests_csv_writer.writerow(self.requests_csv_columns)
requests_csv_data_start = self.requests_csv_filehandle.tell()
self.stats_history_csv_writer.writerow(self.stats_history_csv_columns)
self.failures_csv_writer.writerow(self.failures_columns)
self.failures_csv_data_start = self.failures_csv_filehandle.tell()
self.exceptions_csv_writer.writerow(self.exceptions_columns)
self.exceptions_csv_data_start = self.exceptions_csv_filehandle.tell()
# Continuously write date rows for all files
last_flush_time = 0
while True:
now = time.time()
self.requests_csv_filehandle.seek(requests_csv_data_start)
self._requests_data_rows(self.requests_csv_writer)
self.requests_csv_filehandle.truncate()
self._stats_history_data_rows(self.stats_history_csv_writer, now)
self.failures_csv_filehandle.seek(self.failures_csv_data_start)
self._failures_data_rows(self.failures_csv_writer)
self.failures_csv_filehandle.truncate()
self.exceptions_csv_filehandle.seek((self.exceptions_csv_data_start))
self._exceptions_data_rows(self.exceptions_csv_writer)
self.exceptions_csv_filehandle.truncate()
if now - last_flush_time > CSV_STATS_FLUSH_INTERVAL_SEC:
self.requests_flush()
self.stats_history_flush()
self.failures_flush()
self.exceptions_flush()
last_flush_time = now
gevent.sleep(CSV_STATS_INTERVAL_SEC)
def _stats_history_data_rows(self, csv_writer, now):
"""
Write CSV rows with the *current* stats. By default only includes the
Aggregated stats entry, but if self.full_history is set to True, a row for each entry will
will be included.
Note that this method differs from the other methods as it appends time-stamped data to the file, whereas the other methods overwrites the data.
"""
stats = self.environment.stats
timestamp = int(now)
stats_entries = []
if self.full_history:
stats_entries = sort_stats(stats.entries)
for stats_entry in chain(stats_entries, [stats.total]):
csv_writer.writerow(
chain(
(
timestamp,
self.environment.runner.user_count,
stats_entry.method or "",
stats_entry.name,
f"{stats_entry.current_rps:2f}",
f"{stats_entry.current_fail_per_sec:2f}",
),
self._percentile_fields(stats_entry),
(
stats_entry.num_requests,
stats_entry.num_failures,
stats_entry.median_response_time,
stats_entry.avg_response_time,
stats_entry.min_response_time or 0,
stats_entry.max_response_time,
stats_entry.avg_content_length,
),
)
)
def requests_flush(self):
self.requests_csv_filehandle.flush()
def stats_history_flush(self):
self.stats_history_csv_filehandle.flush()
def failures_flush(self):
self.failures_csv_filehandle.flush()
def exceptions_flush(self):
self.exceptions_csv_filehandle.flush()
def close_files(self):
self.requests_csv_filehandle.close()
self.stats_history_csv_filehandle.close()
self.failures_csv_filehandle.close()
self.exceptions_csv_filehandle.close()
def stats_history_file_name(self):
return self.base_filepath + "_stats_history.csv"
|
import datetime
import hashlib
import time
from collections import namedtuple, OrderedDict
from copy import copy
from itertools import chain
import csv
import gevent
from .exception import StopUser, CatchResponseError
import logging
console_logger = logging.getLogger("locust.stats_logger")
STATS_NAME_WIDTH = 60
STATS_TYPE_WIDTH = 8
"""Default interval for how frequently results are written to console."""
CONSOLE_STATS_INTERVAL_SEC = 2
"""Default interval for how frequently results are written to history."""
HISTORY_STATS_INTERVAL_SEC = 5
"""Default interval for how frequently CSV files are written if this option is configured."""
CSV_STATS_INTERVAL_SEC = 1
CSV_STATS_FLUSH_INTERVAL_SEC = 10
"""
Default window size/resolution - in seconds - when calculating the current
response time percentile
"""
CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW = 10
CachedResponseTimes = namedtuple("CachedResponseTimes", ["response_times", "num_requests"])
PERCENTILES_TO_REPORT = [0.50, 0.66, 0.75, 0.80, 0.90, 0.95, 0.98, 0.99, 0.999, 0.9999, 1.0]
class RequestStatsAdditionError(Exception):
pass
def get_readable_percentiles(percentile_list):
"""
Converts a list of percentiles from 0-1 fraction to 0%-100% view for using in console & csv reporting
:param percentile_list: The list of percentiles in range 0-1
:return: The list of string representation for each percentile in 0%-100% view
"""
return [
f"{int(percentile * 100) if (percentile * 100).is_integer() else round(100 * percentile, 6)}%"
for percentile in percentile_list
]
def calculate_response_time_percentile(response_times, num_requests, percent):
"""
Get the response time that a certain number of percent of the requests
finished within. Arguments:
response_times: A StatsEntry.response_times dict
num_requests: Number of request made (could be derived from response_times,
but we save some CPU cycles by using the value which we already store)
percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0
"""
num_of_request = int((num_requests * percent))
processed_count = 0
for response_time in sorted(response_times.keys(), reverse=True):
processed_count += response_times[response_time]
if num_requests - processed_count <= num_of_request:
return response_time
# if all response times were None
return 0
def calculate_response_time_average(response_times, num_requests):
"""
Get the response time that a certain number of percent of the requests
finished within. Arguments:
response_times: A StatsEntry.response_times dict
num_requests: Number of request made (could be derived from response_times,
but we save some CPU cycles by using the value which we already store)
percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0
"""
num_of_request = int(num_requests)
sum_val = 0
processed_count = 0
for response_time in sorted(response_times.keys(), reverse=True):
processed_count += response_times[response_time]
sum_val += response_time * response_times[response_time]
num_of_request = processed_count
if num_of_request > 0:
return int(sum_val / float(num_of_request))
else:
return 0
def calculate_response_time_max(response_times, num_requests):
"""
Get the response time that a certain number of percent of the requests
finished within. Arguments:
response_times: A StatsEntry.response_times dict
num_requests: Number of request made (could be derived from response_times,
but we save some CPU cycles by using the value which we already store)
percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0
"""
num_of_request = int(num_requests)
max_val = 0
processed_count = 0
for response_time in sorted(response_times.keys(), reverse=True):
processed_count += response_times[response_time]
if response_time > max_val:
max_val = response_time
if max_val is None:
return None
return int(max_val)
def calculate_response_time_min(response_times, num_requests):
"""
Get the response time that a certain number of percent of the requests
finished within. Arguments:
response_times: A StatsEntry.response_times dict
num_requests: Number of request made (could be derived from response_times,
but we save some CPU cycles by using the value which we already store)
percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0
"""
num_of_request = int(num_requests)
min_val = None
processed_count = 0
for response_time in sorted(response_times.keys(), reverse=True):
processed_count += response_times[response_time]
if min_val is None:
min_val = response_time
elif response_time < min_val:
min_val = response_time
if min_val is None:
return None
return int(min_val)
def diff_response_time_dicts(latest, old):
"""
Returns the delta between two {response_times:request_count} dicts.
Used together with the response_times cache to get the response times for the
last X seconds, which in turn is used to calculate the current response time
percentiles.
"""
new = {}
for t in latest:
diff = latest[t] - old.get(t, 0)
if diff:
new[t] = diff
return new
class RequestStats:
"""
Class that holds the request statistics.
"""
def __init__(self, use_response_times_cache=True):
"""
:param use_response_times_cache: The value of use_response_times_cache will be set for each StatsEntry()
when they are created. Settings it to False saves some memory and CPU
cycles which we can do on Worker nodes where the response_times_cache
is not needed.
"""
self.use_response_times_cache = use_response_times_cache
self.entries = {}
self.errors = {}
self.total = StatsEntry(self, "Aggregated", None, use_response_times_cache=self.use_response_times_cache)
self.history = []
@property
def num_requests(self):
return self.total.num_requests
@property
def num_none_requests(self):
return self.total.num_none_requests
@property
def num_failures(self):
return self.total.num_failures
@property
def last_request_timestamp(self):
return self.total.last_request_timestamp
@property
def start_time(self):
return self.total.start_time
def log_request(self, method, name, response_time, content_length):
self.total.log(response_time, content_length)
self.get(name, method).log(response_time, content_length)
def log_error(self, method, name, error):
self.total.log_error(error)
self.get(name, method).log_error(error)
# store error in errors dict
key = StatsError.create_key(method, name, error)
entry = self.errors.get(key)
if not entry:
entry = StatsError(method, name, error)
self.errors[key] = entry
entry.occurred()
def get(self, name, method):
"""
Retrieve a StatsEntry instance by name and method
"""
entry = self.entries.get((name, method))
if not entry:
entry = StatsEntry(self, name, method, use_response_times_cache=self.use_response_times_cache)
self.entries[(name, method)] = entry
return entry
def reset_all(self):
"""
Go through all stats entries and reset them to zero
"""
self.total.reset()
self.errors = {}
for r in self.entries.values():
r.reset()
self.history = []
def clear_all(self):
"""
Remove all stats entries and errors
"""
self.total = StatsEntry(self, "Aggregated", None, use_response_times_cache=self.use_response_times_cache)
self.entries = {}
self.errors = {}
self.history = []
def serialize_stats(self):
return [
self.entries[key].get_stripped_report()
for key in self.entries.keys()
if not (self.entries[key].num_requests == 0 and self.entries[key].num_failures == 0)
]
def serialize_errors(self):
return dict([(k, e.to_dict()) for k, e in self.errors.items()])
class StatsEntry:
"""
Represents a single stats entry (name and method)
"""
name = None
""" Name (URL) of this stats entry """
method = None
""" Method (GET, POST, PUT, etc.) """
num_requests = None
""" The number of requests made """
num_none_requests = None
""" The number of requests made with a None response time (typically async requests) """
num_failures = None
""" Number of failed request """
total_response_time = None
""" Total sum of the response times """
min_response_time = None
""" Minimum response time """
max_response_time = None
""" Maximum response time """
num_reqs_per_sec = None
""" A {second => request_count} dict that holds the number of requests made per second """
num_fail_per_sec = None
""" A (second => failure_count) dict that hold the number of failures per second """
response_times = None
"""
A {response_time => count} dict that holds the response time distribution of all
the requests.
The keys (the response time in ms) are rounded to store 1, 2, ... 9, 10, 20. .. 90,
100, 200 .. 900, 1000, 2000 ... 9000, in order to save memory.
This dict is used to calculate the median and percentile response times.
"""
use_response_times_cache = False
"""
If set to True, the copy of the response_time dict will be stored in response_times_cache
every second, and kept for 20 seconds (by default, will be CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10).
We can use this dict to calculate the *current* median response time, as well as other response
time percentiles.
"""
response_times_cache = None
"""
If use_response_times_cache is set to True, this will be a {timestamp => CachedResponseTimes()}
OrderedDict that holds a copy of the response_times dict for each of the last 20 seconds.
"""
total_content_length = None
""" The sum of the content length of all the requests for this entry """
start_time = None
""" Time of the first request for this entry """
last_request_timestamp = None
""" Time of the last request for this entry """
def __init__(self, stats, name, method, use_response_times_cache=False):
self.stats = stats
self.name = name
self.method = method
self.use_response_times_cache = use_response_times_cache
self.reset()
def reset(self):
self.start_time = time.time()
self.num_requests = 0
self.num_none_requests = 0
self.num_failures = 0
self.total_response_time = 0
self.response_times = {}
self.min_response_time = None
self.max_response_time = 0
self.last_request_timestamp = None
self.num_reqs_per_sec = {}
self.num_fail_per_sec = {}
self.total_content_length = 0
if self.use_response_times_cache:
self.response_times_cache = OrderedDict()
self._cache_response_times(int(time.time()))
def log(self, response_time, content_length):
# get the time
current_time = time.time()
t = int(current_time)
if self.use_response_times_cache and self.last_request_timestamp and t > int(self.last_request_timestamp):
# see if we shall make a copy of the response_times dict and store in the cache
self._cache_response_times(t - 1)
self.num_requests += 1
self._log_time_of_request(current_time)
self._log_response_time(response_time)
# increase total content-length
self.total_content_length += content_length
def _log_time_of_request(self, current_time):
t = int(current_time)
self.num_reqs_per_sec[t] = self.num_reqs_per_sec.setdefault(t, 0) + 1
self.last_request_timestamp = current_time
def _log_response_time(self, response_time):
if response_time is None:
self.num_none_requests += 1
return
self.total_response_time += response_time
if self.min_response_time is None:
self.min_response_time = response_time
self.min_response_time = min(self.min_response_time, response_time)
self.max_response_time = max(self.max_response_time, response_time)
# to avoid to much data that has to be transferred to the master node when
# running in distributed mode, we save the response time rounded in a dict
# so that 147 becomes 150, 3432 becomes 3400 and 58760 becomes 59000
if response_time < 100:
rounded_response_time = round(response_time)
elif response_time < 1000:
rounded_response_time = round(response_time, -1)
elif response_time < 10000:
rounded_response_time = round(response_time, -2)
else:
rounded_response_time = round(response_time, -3)
# increase request count for the rounded key in response time dict
self.response_times.setdefault(rounded_response_time, 0)
self.response_times[rounded_response_time] += 1
def log_error(self, error):
self.num_failures += 1
t = int(time.time())
self.num_fail_per_sec[t] = self.num_fail_per_sec.setdefault(t, 0) + 1
@property
def fail_ratio(self):
try:
return float(self.num_failures) / self.num_requests
except ZeroDivisionError:
if self.num_failures > 0:
return 1.0
else:
return 0.0
@property
def avg_response_time(self):
try:
return float(self.total_response_time) / (self.num_requests - self.num_none_requests)
except ZeroDivisionError:
return 0
@property
def median_response_time(self):
if not self.response_times:
return 0
median = median_from_dict(self.num_requests - self.num_none_requests, self.response_times) or 0
# Since we only use two digits of precision when calculating the median response time
# while still using the exact values for min and max response times, the following checks
# makes sure that we don't report a median > max or median < min when a StatsEntry only
# have one (or very few) really slow requests
if median > self.max_response_time:
median = self.max_response_time
elif median < self.min_response_time:
median = self.min_response_time
return median
@property
def current_rps(self):
if self.stats.last_request_timestamp is None:
return 0
slice_start_time = max(int(self.stats.last_request_timestamp) - 12, int(self.stats.start_time or 0))
reqs = [
self.num_reqs_per_sec.get(t, 0) for t in range(slice_start_time, int(self.stats.last_request_timestamp) - 2)
]
return avg(reqs)
@property
def current_fail_per_sec(self):
if self.stats.last_request_timestamp is None:
return 0
slice_start_time = max(int(self.stats.last_request_timestamp) - 12, int(self.stats.start_time or 0))
reqs = [
self.num_fail_per_sec.get(t, 0) for t in range(slice_start_time, int(self.stats.last_request_timestamp) - 2)
]
return avg(reqs)
@property
def total_rps(self):
if not self.stats.last_request_timestamp or not self.stats.start_time:
return 0.0
try:
return self.num_requests / (self.stats.last_request_timestamp - self.stats.start_time)
except ZeroDivisionError:
return 0.0
@property
def total_fail_per_sec(self):
if not self.stats.last_request_timestamp or not self.stats.start_time:
return 0.0
try:
return self.num_failures / (self.stats.last_request_timestamp - self.stats.start_time)
except ZeroDivisionError:
return 0.0
@property
def avg_content_length(self):
try:
return self.total_content_length / self.num_requests
except ZeroDivisionError:
return 0
def extend(self, other):
"""
Extend the data from the current StatsEntry with the stats from another
StatsEntry instance.
"""
# save the old last_request_timestamp, to see if we should store a new copy
# of the response times in the response times cache
old_last_request_timestamp = self.last_request_timestamp
if self.last_request_timestamp is not None and other.last_request_timestamp is not None:
self.last_request_timestamp = max(self.last_request_timestamp, other.last_request_timestamp)
elif other.last_request_timestamp is not None:
self.last_request_timestamp = other.last_request_timestamp
self.start_time = min(self.start_time, other.start_time)
self.num_requests = self.num_requests + other.num_requests
self.num_none_requests = self.num_none_requests + other.num_none_requests
self.num_failures = self.num_failures + other.num_failures
self.total_response_time = self.total_response_time + other.total_response_time
self.max_response_time = max(self.max_response_time, other.max_response_time)
if self.min_response_time is not None and other.min_response_time is not None:
self.min_response_time = min(self.min_response_time, other.min_response_time)
elif other.min_response_time is not None:
# this means self.min_response_time is None, so we can safely replace it
self.min_response_time = other.min_response_time
self.total_content_length = self.total_content_length + other.total_content_length
for key in other.response_times:
self.response_times[key] = self.response_times.get(key, 0) + other.response_times[key]
for key in other.num_reqs_per_sec:
self.num_reqs_per_sec[key] = self.num_reqs_per_sec.get(key, 0) + other.num_reqs_per_sec[key]
for key in other.num_fail_per_sec:
self.num_fail_per_sec[key] = self.num_fail_per_sec.get(key, 0) + other.num_fail_per_sec[key]
if self.use_response_times_cache:
# If we've entered a new second, we'll cache the response times. Note that there
# might still be reports from other worker nodes - that contains requests for the same
# time periods - that hasn't been received/accounted for yet. This will cause the cache to
# lag behind a second or two, but since StatsEntry.current_response_time_percentile()
# (which is what the response times cache is used for) uses an approximation of the
# last 10 seconds anyway, it should be fine to ignore this.
last_time = self.last_request_timestamp and int(self.last_request_timestamp) or None
if last_time and last_time > (old_last_request_timestamp and int(old_last_request_timestamp) or 0):
self._cache_response_times(last_time)
def serialize(self):
return {
"name": self.name,
"method": self.method,
"last_request_timestamp": self.last_request_timestamp,
"start_time": self.start_time,
"num_requests": self.num_requests,
"num_none_requests": self.num_none_requests,
"num_failures": self.num_failures,
"total_response_time": self.total_response_time,
"max_response_time": self.max_response_time,
"min_response_time": self.min_response_time,
"total_content_length": self.total_content_length,
"response_times": self.response_times,
"num_reqs_per_sec": self.num_reqs_per_sec,
"num_fail_per_sec": self.num_fail_per_sec,
}
@classmethod
def unserialize(cls, data):
obj = cls(None, data["name"], data["method"])
for key in [
"last_request_timestamp",
"start_time",
"num_requests",
"num_none_requests",
"num_failures",
"total_response_time",
"max_response_time",
"min_response_time",
"total_content_length",
"response_times",
"num_reqs_per_sec",
"num_fail_per_sec",
]:
setattr(obj, key, data[key])
return obj
def get_stripped_report(self):
"""
Return the serialized version of this StatsEntry, and then clear the current stats.
"""
report = self.serialize()
self.reset()
return report
def to_string(self, current=True):
"""
Return the stats as a string suitable for console output. If current is True, it'll show
the RPS and failure rate for the last 10 seconds. If it's false, it'll show the total stats
for the whole run.
"""
if current:
rps = self.current_rps
fail_per_sec = self.current_fail_per_sec
else:
rps = self.total_rps
fail_per_sec = self.total_fail_per_sec
return (" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s | %7d %7d %7d %7d | %7.2f %7.2f") % (
(self.method and self.method + " " or "") + self.name,
self.num_requests,
"%d(%.2f%%)" % (self.num_failures, self.fail_ratio * 100),
self.avg_response_time,
self.min_response_time or 0,
self.max_response_time,
self.median_response_time or 0,
rps or 0,
fail_per_sec or 0,
)
def __str__(self):
return self.to_string(current=True)
def get_response_time_percentile(self, percent):
"""
Get the response time that a certain number of percent of the requests
finished within.
Percent specified in range: 0.0 - 1.0
"""
return calculate_response_time_percentile(self.response_times, self.num_requests, percent)
def get_current_response_time_average(self):
"""
Calculate the *current* response time for a certain percentile. We use a sliding
window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
when calculating this.
"""
if not self.use_response_times_cache:
raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
# First, we want to determine which of the cached response_times dicts we should
# use to get response_times for approximately 10 seconds ago.
t = int(time.time())
# Since we can't be sure that the cache contains an entry for every second.
# We'll construct a list of timestamps which we consider acceptable keys to be used
# when trying to fetch the cached response_times. We construct this list in such a way
# that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8,
# and so on
acceptable_timestamps = []
for i in range(9):
acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-i)
acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW+i)
cached = None
for ts in acceptable_timestamps:
if ts in self.response_times_cache:
cached = self.response_times_cache[ts]
break
if cached:
# If we fond an acceptable cached response times, we'll calculate a new response
# times dict of the last 10 seconds (approximately) by diffing it with the current
# total response times. Then we'll use that to calculate a response time percentile
# for that timeframe
return calculate_response_time_average(
diff_response_time_dicts(self.response_times, cached.response_times),
self.num_requests - cached.num_requests,
)
def get_current_response_time_max(self):
"""
Calculate the *current* response time for a certain percentile. We use a sliding
window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
when calculating this.
"""
if not self.use_response_times_cache:
raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
# First, we want to determine which of the cached response_times dicts we should
# use to get response_times for approximately 10 seconds ago.
t = int(time.time())
# Since we can't be sure that the cache contains an entry for every second.
# We'll construct a list of timestamps which we consider acceptable keys to be used
# when trying to fetch the cached response_times. We construct this list in such a way
# that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8,
# and so on
acceptable_timestamps = []
for i in range(9):
acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-i)
acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW+i)
cached = None
for ts in acceptable_timestamps:
if ts in self.response_times_cache:
cached = self.response_times_cache[ts]
break
if cached:
# If we fond an acceptable cached response times, we'll calculate a new response
# times dict of the last 10 seconds (approximately) by diffing it with the current
# total response times. Then we'll use that to calculate a response time percentile
# for that timeframe
return calculate_response_time_max(
diff_response_time_dicts(self.response_times, cached.response_times),
self.num_requests - cached.num_requests,
)
def get_current_response_time_min(self):
"""
Calculate the *current* response time for a certain percentile. We use a sliding
window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
when calculating this.
"""
if not self.use_response_times_cache:
raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
# First, we want to determine which of the cached response_times dicts we should
# use to get response_times for approximately 10 seconds ago.
t = int(time.time())
# Since we can't be sure that the cache contains an entry for every second.
# We'll construct a list of timestamps which we consider acceptable keys to be used
# when trying to fetch the cached response_times. We construct this list in such a way
# that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8,
# and so on
acceptable_timestamps = []
for i in range(9):
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW - i)
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + i)
cached = None
for ts in acceptable_timestamps:
if ts in self.response_times_cache:
cached = self.response_times_cache[ts]
break
if cached:
# If we fond an acceptable cached response times, we'll calculate a new response
# times dict of the last 10 seconds (approximately) by diffing it with the current
# total response times. Then we'll use that to calculate a response time percentile
# for that timeframe
return calculate_response_time_min(
diff_response_time_dicts(self.response_times, cached.response_times),
self.num_requests - cached.num_requests,
)
def get_current_response_time_percentile(self, percent):
"""
Calculate the *current* response time for a certain percentile. We use a sliding
window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
when calculating this.
"""
if not self.use_response_times_cache:
raise ValueError(
"StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile"
)
# First, we want to determine which of the cached response_times dicts we should
# use to get response_times for approximately 10 seconds ago.
t = int(time.time())
# Since we can't be sure that the cache contains an entry for every second.
# We'll construct a list of timestamps which we consider acceptable keys to be used
# when trying to fetch the cached response_times. We construct this list in such a way
# that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8,
# and so on
acceptable_timestamps = []
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW)
for i in range(1, 9):
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW - i)
acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + i)
cached = None
for ts in acceptable_timestamps:
if ts in self.response_times_cache:
cached = self.response_times_cache[ts]
break
if cached:
# If we fond an acceptable cached response times, we'll calculate a new response
# times dict of the last 10 seconds (approximately) by diffing it with the current
# total response times. Then we'll use that to calculate a response time percentile
# for that timeframe
return calculate_response_time_percentile(
diff_response_time_dicts(self.response_times, cached.response_times),
self.num_requests - cached.num_requests,
percent,
)
def percentile(self):
if not self.num_requests:
raise ValueError("Can't calculate percentile on url with no successful requests")
tpl = f" %-{str(STATS_TYPE_WIDTH)}s %-{str(STATS_NAME_WIDTH)}s %8d {' '.join(['%6d'] * len(PERCENTILES_TO_REPORT))}"
return tpl % (
(self.method, self.name)
+ tuple([self.get_response_time_percentile(p) for p in PERCENTILES_TO_REPORT])
+ (self.num_requests,)
)
def _cache_response_times(self, t):
self.response_times_cache[t] = CachedResponseTimes(
response_times=copy(self.response_times),
num_requests=self.num_requests,
)
# We'll use a cache size of CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10 since - in the extreme case -
# we might still use response times (from the cache) for t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-10
# to calculate the current response time percentile, if we're missing cached values for the subsequent
# 20 seconds
cache_size = CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10
if len(self.response_times_cache) > cache_size:
# only keep the latest 20 response_times dicts
for i in range(len(self.response_times_cache) - cache_size):
self.response_times_cache.popitem(last=False)
class StatsError:
def __init__(self, method, name, error, occurrences=0):
self.method = method
self.name = name
self.error = error
self.occurrences = occurrences
@classmethod
def parse_error(cls, error):
string_error = repr(error)
target = "object at 0x"
target_index = string_error.find(target)
if target_index < 0:
return string_error
start = target_index + len(target) - 2
end = string_error.find(">", start)
if end < 0:
return string_error
hex_address = string_error[start:end]
return string_error.replace(hex_address, "0x....")
@classmethod
def create_key(cls, method, name, error):
key = "%s.%s.%r" % (method, name, StatsError.parse_error(error))
return hashlib.md5(key.encode("utf-8")).hexdigest()
def occurred(self):
self.occurrences += 1
def to_name(self):
error = self.error
if isinstance(error, CatchResponseError):
# standalone
unwrapped_error = error.args[0]
if isinstance(error, str) and error.startswith("CatchResponseError("):
# distributed
length = len("CatchResponseError(")
unwrapped_error = error[length:-1]
else:
# standalone, unwrapped exception
unwrapped_error = repr(error)
return "%s %s: %s" % (self.method, self.name, unwrapped_error)
def to_dict(self):
return {
"method": self.method,
"name": self.name,
"error": StatsError.parse_error(self.error),
"occurrences": self.occurrences,
}
@classmethod
def from_dict(cls, data):
return cls(data["method"], data["name"], data["error"], data["occurrences"])
def avg(values):
return sum(values, 0.0) / max(len(values), 1)
def median_from_dict(total, count):
"""
total is the number of requests made
count is a dict {response_time: count}
"""
pos = (total - 1) / 2
for k in sorted(count.keys()):
if pos < count[k]:
return k
pos -= count[k]
def setup_distributed_stats_event_listeners(events, stats):
def on_report_to_master(client_id, data):
data["stats"] = stats.serialize_stats()
data["stats_total"] = stats.total.get_stripped_report()
data["errors"] = stats.serialize_errors()
stats.errors = {}
def on_worker_report(client_id, data):
for stats_data in data["stats"]:
entry = StatsEntry.unserialize(stats_data)
request_key = (entry.name, entry.method)
if not request_key in stats.entries:
stats.entries[request_key] = StatsEntry(stats, entry.name, entry.method, use_response_times_cache=True)
stats.entries[request_key].extend(entry)
for error_key, error in data["errors"].items():
if error_key not in stats.errors:
stats.errors[error_key] = StatsError.from_dict(error)
else:
stats.errors[error_key].occurrences += error["occurrences"]
stats.total.extend(StatsEntry.unserialize(data["stats_total"]))
events.report_to_master.add_listener(on_report_to_master)
events.worker_report.add_listener(on_worker_report)
def print_stats(stats, current=True):
console_logger.info(
(" %-" + str(STATS_NAME_WIDTH) + "s %7s %12s | %7s %7s %7s %7s | %7s %7s")
% ("Name", "# reqs", "# fails", "Avg", "Min", "Max", "Median", "req/s", "failures/s")
)
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
for key in sorted(stats.entries.keys()):
r = stats.entries[key]
console_logger.info(r.to_string(current=current))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
console_logger.info(stats.total.to_string(current=current))
console_logger.info("")
def print_percentile_stats(stats):
console_logger.info("Response time percentiles (approximated)")
headers = ("Type", "Name") + tuple(get_readable_percentiles(PERCENTILES_TO_REPORT)) + ("# reqs",)
console_logger.info(
(
f" %-{str(STATS_TYPE_WIDTH)}s %-{str(STATS_NAME_WIDTH)}s %8s "
f"{' '.join(['%6s'] * len(PERCENTILES_TO_REPORT))}"
)
% headers
)
separator = (
f'{"-" * STATS_TYPE_WIDTH}|{"-" * STATS_NAME_WIDTH}|{"-" * 9}|{("-" * 6 + "|") * len(PERCENTILES_TO_REPORT)}'
)
console_logger.info(separator)
for key in sorted(stats.entries.keys()):
r = stats.entries[key]
if r.response_times:
console_logger.info(r.percentile())
console_logger.info(separator)
if stats.total.response_times:
console_logger.info(stats.total.percentile())
console_logger.info("")
def print_error_report(stats):
if not len(stats.errors):
return
console_logger.info("Error report")
console_logger.info(" %-18s %-100s" % ("# occurrences", "Error"))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
for error in stats.errors.values():
console_logger.info(" %-18i %-100s" % (error.occurrences, error.to_name()))
console_logger.info("-" * (80 + STATS_NAME_WIDTH))
console_logger.info("")
def stats_printer(stats):
def stats_printer_func():
while True:
print_stats(stats)
gevent.sleep(CONSOLE_STATS_INTERVAL_SEC)
return stats_printer_func
def sort_stats(stats):
return [stats[key] for key in sorted(stats.keys())]
def stats_history(runner):
"""Save current stats info to history for charts of report."""
while True:
stats = runner.stats
if not stats.total.use_response_times_cache:
break
r = {
"time": datetime.datetime.now().strftime("%H:%M:%S"),
"current_rps": stats.total.current_rps or 0,
"current_fail_per_sec": stats.total.current_fail_per_sec or 0,
"response_time_percentile_95": stats.total.get_current_response_time_percentile(0.95) or 0,
"response_time_percentile_50": stats.total.get_current_response_time_percentile(0.5) or 0,
"user_count": runner.user_count or 0,
}
stats.history.append(r)
gevent.sleep(HISTORY_STATS_INTERVAL_SEC)
class StatsCSV:
"""Write statistics to csv_writer stream."""
def __init__(self, environment, percentiles_to_report):
super().__init__()
self.environment = environment
self.percentiles_to_report = percentiles_to_report
self.percentiles_na = ["N/A"] * len(self.percentiles_to_report)
self.requests_csv_columns = [
"Type",
"Name",
"Request Count",
"Failure Count",
"Median Response Time",
"Average Response Time",
"Min Response Time",
"Max Response Time",
"Average Content Size",
"Requests/s",
"Failures/s",
] + get_readable_percentiles(self.percentiles_to_report)
self.failures_columns = [
"Method",
"Name",
"Error",
"Occurrences",
]
self.exceptions_columns = [
"Count",
"Message",
"Traceback",
"Nodes",
]
def _percentile_fields(self, stats_entry):
return (
[int(stats_entry.get_response_time_percentile(x) or 0) for x in self.percentiles_to_report]
if stats_entry.num_requests
else self.percentiles_na
)
def requests_csv(self, csv_writer):
"""Write requests csv with header and data rows."""
csv_writer.writerow(self.requests_csv_columns)
self._requests_data_rows(csv_writer)
def _requests_data_rows(self, csv_writer):
"""Write requests csv data row, excluding header."""
stats = self.environment.stats
for stats_entry in chain(sort_stats(stats.entries), [stats.total]):
csv_writer.writerow(
chain(
[
stats_entry.method,
stats_entry.name,
stats_entry.num_requests,
stats_entry.num_failures,
stats_entry.median_response_time,
stats_entry.avg_response_time,
stats_entry.min_response_time or 0,
stats_entry.max_response_time,
stats_entry.avg_content_length,
stats_entry.total_rps,
stats_entry.total_fail_per_sec,
],
self._percentile_fields(stats_entry),
)
)
def failures_csv(self, csv_writer):
csv_writer.writerow(self.failures_columns)
self._failures_data_rows(csv_writer)
def _failures_data_rows(self, csv_writer):
for stats_error in sort_stats(self.environment.stats.errors):
csv_writer.writerow(
[
stats_error.method,
stats_error.name,
stats_error.error,
stats_error.occurrences,
]
)
def exceptions_csv(self, csv_writer):
csv_writer.writerow(self.exceptions_columns)
self._exceptions_data_rows(csv_writer)
def _exceptions_data_rows(self, csv_writer):
for exc in self.environment.runner.exceptions.values():
csv_writer.writerow(
[
exc["count"],
exc["msg"],
exc["traceback"],
", ".join(exc["nodes"])
]
)
class StatsCSVFileWriter(StatsCSV):
"""Write statistics to to CSV files"""
def __init__(self, environment, percentiles_to_report, base_filepath, full_history=False):
super().__init__(environment, percentiles_to_report)
self.base_filepath = base_filepath
self.full_history = full_history
self.requests_csv_filehandle = open(self.base_filepath + "_stats.csv", "w")
self.requests_csv_writer = csv.writer(self.requests_csv_filehandle)
self.stats_history_csv_filehandle = open(self.stats_history_file_name(), "w")
self.stats_history_csv_writer = csv.writer(self.stats_history_csv_filehandle)
self.failures_csv_filehandle = open(self.base_filepath + "_failures.csv", "w")
self.failures_csv_writer = csv.writer(self.failures_csv_filehandle)
self.failures_csv_data_start = 0
self.exceptions_csv_filehandle = open(self.base_filepath + "_exceptions.csv", "w")
self.exceptions_csv_writer = csv.writer(self.exceptions_csv_filehandle)
self.exceptions_csv_data_start = 0
self.stats_history_csv_columns = [
"Timestamp",
"User Count",
"Type",
"Name",
"Requests/s",
"Failures/s",
*get_readable_percentiles(self.percentiles_to_report),
"Total Request Count",
"Total Failure Count",
"Total Median Response Time",
"Total Average Response Time",
"Total Min Response Time",
"Total Max Response Time",
"Total Average Content Size",
]
def __call__(self):
self.stats_writer()
def stats_writer(self):
"""Writes all the csv files for the locust run."""
# Write header row for all files and save position for non-append files
self.requests_csv_writer.writerow(self.requests_csv_columns)
requests_csv_data_start = self.requests_csv_filehandle.tell()
self.stats_history_csv_writer.writerow(self.stats_history_csv_columns)
self.failures_csv_writer.writerow(self.failures_columns)
self.failures_csv_data_start = self.failures_csv_filehandle.tell()
self.exceptions_csv_writer.writerow(self.exceptions_columns)
self.exceptions_csv_data_start = self.exceptions_csv_filehandle.tell()
# Continuously write date rows for all files
last_flush_time = 0
while True:
now = time.time()
self.requests_csv_filehandle.seek(requests_csv_data_start)
self._requests_data_rows(self.requests_csv_writer)
self.requests_csv_filehandle.truncate()
self._stats_history_data_rows(self.stats_history_csv_writer, now)
self.failures_csv_filehandle.seek(self.failures_csv_data_start)
self._failures_data_rows(self.failures_csv_writer)
self.failures_csv_filehandle.truncate()
self.exceptions_csv_filehandle.seek((self.exceptions_csv_data_start))
self._exceptions_data_rows(self.exceptions_csv_writer)
self.exceptions_csv_filehandle.truncate()
if now - last_flush_time > CSV_STATS_FLUSH_INTERVAL_SEC:
self.requests_flush()
self.stats_history_flush()
self.failures_flush()
self.exceptions_flush()
last_flush_time = now
gevent.sleep(CSV_STATS_INTERVAL_SEC)
def _stats_history_data_rows(self, csv_writer, now):
"""
Write CSV rows with the *current* stats. By default only includes the
Aggregated stats entry, but if self.full_history is set to True, a row for each entry will
will be included.
Note that this method differs from the other methods as it appends time-stamped data to the file, whereas the other methods overwrites the data.
"""
stats = self.environment.stats
timestamp = int(now)
stats_entries = []
if self.full_history:
stats_entries = sort_stats(stats.entries)
for stats_entry in chain(stats_entries, [stats.total]):
csv_writer.writerow(
chain(
(
timestamp,
self.environment.runner.user_count,
stats_entry.method or "",
stats_entry.name,
f"{stats_entry.current_rps:2f}",
f"{stats_entry.current_fail_per_sec:2f}",
),
self._percentile_fields(stats_entry),
(
stats_entry.num_requests,
stats_entry.num_failures,
stats_entry.median_response_time,
stats_entry.avg_response_time,
stats_entry.min_response_time or 0,
stats_entry.max_response_time,
stats_entry.avg_content_length,
),
)
)
def requests_flush(self):
self.requests_csv_filehandle.flush()
def stats_history_flush(self):
self.stats_history_csv_filehandle.flush()
def failures_flush(self):
self.failures_csv_filehandle.flush()
def exceptions_flush(self):
self.exceptions_csv_filehandle.flush()
def close_files(self):
self.requests_csv_filehandle.close()
self.stats_history_csv_filehandle.close()
self.failures_csv_filehandle.close()
self.exceptions_csv_filehandle.close()
def stats_history_file_name(self):
return self.base_filepath + "_stats_history.csv"
|
"""Map Sentinel-1 data products to xarray.
References:
- Sentinel-1 document library: https://sentinels.copernicus.eu/web/sentinel/user-guides/sentinel-1-sar/document-library
- Sentinel-1 Product Specification v3.9 07 May 2021 S1-RS-MDA-52-7441-3-9 documenting IPF 3.40
https://sentinel.esa.int/documents/247904/1877131/S1-RS-MDA-52-7441-3-9-2_Sentinel-1ProductSpecification.pdf
- Sentinel-1 Product Specification v3.7 27 February 2020 S1-RS-MDA-52-7441 documenting IPF 3.30
https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Specification
"""
import contextlib
import os
import typing as T
import warnings
import fsspec
import numpy as np
import pandas as pd
import xarray as xr
from . import conventions, esa_safe
SPEED_OF_LIGHT = 299_792_458 # m / s
ONE_SECOND = np.timedelta64(1, "s")
def get_fs_path(
urlpath_or_path: esa_safe.PathType,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
storage_options: T.Optional[T.Dict[str, T.Any]] = None,
) -> T.Tuple[fsspec.AbstractFileSystem, str]:
if fs is not None and storage_options is not None:
raise TypeError("only one of 'fs' and 'storage_options' can be not None")
if fs is None:
fs, _, paths = fsspec.get_fs_token_paths(
urlpath_or_path, storage_options=storage_options
)
if len(paths) == 0:
raise ValueError(f"file or object not found {urlpath_or_path!r}")
elif len(paths) > 1:
raise ValueError(f"multiple files or objects found {urlpath_or_path!r}")
path = paths[0]
else:
path = str(urlpath_or_path)
return fs, path
def normalise_group(group: T.Optional[str]) -> T.Tuple[str, T.Optional[int]]:
if group is None:
group = ""
if group.startswith("/"):
group = group[1:]
burst_index = None
parent_group, _, last_name = group.rpartition("/")
if parent_group.count("/") == 1 and last_name.isnumeric():
burst_index = int(last_name)
group = parent_group
return group, burst_index
def open_calibration_dataset(calibration: esa_safe.PathType) -> xr.Dataset:
calibration_vectors = esa_safe.parse_tag_as_list(
calibration, ".//calibrationVector", "calibration"
)
azimuth_time_list = []
pixel_list = []
line_list = []
sigmaNought_list = []
betaNought_list = []
gamma_list = []
dn_list = []
for vector in calibration_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ") # type: ignore
pixel_list.append(pixel)
sigmaNought = np.fromstring(vector["sigmaNought"]["$"], dtype=np.float32, sep=" ") # type: ignore
sigmaNought_list.append(sigmaNought)
betaNought = np.fromstring(vector["betaNought"]["$"], dtype=np.float32, sep=" ") # type: ignore
betaNought_list.append(betaNought)
gamma = np.fromstring(vector["gamma"]["$"], dtype=np.float32, sep=" ") # type: ignore
gamma_list.append(gamma)
dn = np.fromstring(vector["dn"]["$"], dtype=np.float32, sep=" ") # type: ignore
dn_list.append(dn)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise calibration vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"sigmaNought": (("line", "pixel"), sigmaNought_list),
"betaNought": (("line", "pixel"), betaNought_list),
"gamma": (("line", "pixel"), gamma_list),
"dn": (("line", "pixel"), dn_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_noise_range_dataset(noise: esa_safe.PathType) -> xr.Dataset:
noise_vectors = esa_safe.parse_tag_as_list(noise, ".//noiseRangeVector", "noise")
azimuth_time_list = []
pixel_list = []
line_list = []
noiseRangeLut_list = []
for vector in noise_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ") # type: ignore
pixel_list.append(pixel)
noiseRangeLut = np.fromstring(vector["noiseRangeLut"]["$"], dtype=np.float32, sep=" ") # type: ignore
noiseRangeLut_list.append(noiseRangeLut)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise noise vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"noiseRangeLut": (("line", "pixel"), noiseRangeLut_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_noise_azimuth_dataset(noise: esa_safe.PathType) -> xr.Dataset:
noise_vectors = esa_safe.parse_tag_as_list(noise, ".//noiseAzimuthVector", "noise")
first_range_sample = []
line_list = []
noiseAzimuthLut_list = []
for vector in noise_vectors:
first_range_sample.append(vector["firstRangeSample"])
line = np.fromstring(vector["line"]["$"], dtype=int, sep=" ") # type: ignore
line_list.append(line)
noiseAzimuthLut = np.fromstring(vector["noiseAzimuthLut"]["$"], dtype=np.float32, sep=" ") # type: ignore
noiseAzimuthLut_list.append(noiseAzimuthLut)
# BROKEN: GRDs have line and noiseAzimuthLut of different size, we take the first one
data_vars = {}
coords = {}
if first_range_sample:
data_vars["noiseAzimuthLut"] = ("line", noiseAzimuthLut_list[0])
coords["line"] = line_list[0]
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_coordinate_conversion_dataset(
annotation_path: esa_safe.PathType,
) -> xr.Dataset:
coordinate_conversion = esa_safe.parse_tag_as_list(
annotation_path, ".//coordinateConversionList/coordinateConversion"
)
gr0 = []
sr0 = []
azimuth_time = []
slant_range_time = []
srgrCoefficients: T.List[T.List[float]] = []
grsrCoefficients: T.List[T.List[float]] = []
for values in coordinate_conversion:
sr0.append(values["sr0"])
gr0.append(values["gr0"])
azimuth_time.append(values["azimuthTime"])
slant_range_time.append(values["slantRangeTime"])
srgrCoefficients.append(
[float(v) for v in values["srgrCoefficients"]["$"].split()]
)
grsrCoefficients.append(
[float(v) for v in values["grsrCoefficients"]["$"].split()]
)
coords: T.Dict[str, T.Any] = {}
data_vars: T.Dict[str, T.Any] = {}
if srgrCoefficients:
coords["azimuth_time"] = [np.datetime64(dt) for dt in azimuth_time]
coords["degree"] = list(range(len(srgrCoefficients[0])))
data_vars["gr0"] = ("azimuth_time", gr0)
data_vars["sr0"] = ("azimuth_time", sr0)
data_vars["slant_range_time"] = ("azimuth_time", slant_range_time)
data_vars["srgrCoefficients"] = (("azimuth_time", "degree"), srgrCoefficients)
data_vars["grsrCoefficients"] = (("azimuth_time", "degree"), grsrCoefficients)
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_gcp_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
geolocation_grid_points = esa_safe.parse_tag_as_list(
annotation, ".//geolocationGridPoint"
)
azimuth_time = []
slant_range_time = []
line_set = set()
pixel_set = set()
for ggp in geolocation_grid_points:
if ggp["line"] not in line_set:
azimuth_time.append(np.datetime64(ggp["azimuthTime"]))
line_set.add(ggp["line"])
if ggp["pixel"] not in pixel_set:
slant_range_time.append(ggp["slantRangeTime"])
pixel_set.add(ggp["pixel"])
shape = (len(azimuth_time), len(slant_range_time))
dims = ("azimuth_time", "slant_range_time")
data_vars = {
"latitude": (dims, np.full(shape, np.nan)),
"longitude": (dims, np.full(shape, np.nan)),
"height": (dims, np.full(shape, np.nan)),
"incidenceAngle": (dims, np.full(shape, np.nan)),
"elevationAngle": (dims, np.full(shape, np.nan)),
}
line = sorted(line_set)
pixel = sorted(pixel_set)
for ggp in geolocation_grid_points:
for var in data_vars:
j = line.index(ggp["line"])
i = pixel.index(ggp["pixel"])
data_vars[var][1][j, i] = ggp[var]
ds = xr.Dataset(
data_vars=data_vars,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"slant_range_time": slant_range_time,
"line": ("azimuth_time", line),
"pixel": ("slant_range_time", pixel),
},
)
return ds
def open_attitude_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
attitudes = esa_safe.parse_tag_as_list(annotation, ".//attitude")
variables = ["q0", "q1", "q2", "q3", "wx", "wy", "wz", "pitch", "roll", "yaw"]
azimuth_time: T.List[T.Any] = []
data_vars: T.Dict[str, T.Any] = {var: ("azimuth_time", []) for var in variables}
for attitude in attitudes:
azimuth_time.append(attitude["time"])
for var in variables:
data_vars[var][1].append(attitude[var])
ds = xr.Dataset(
data_vars=data_vars,
coords={"azimuth_time": [np.datetime64(dt) for dt in azimuth_time]},
)
return ds
def open_orbit_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
orbits = esa_safe.parse_tag_as_list(annotation, ".//orbit")
reference_system = orbits[0]["frame"]
variables = ["position", "velocity"]
data: T.Dict[str, T.List[T.Any]] = {var: [[], [], []] for var in variables}
azimuth_time: T.List[T.Any] = []
for orbit in orbits:
azimuth_time.append(orbit["time"])
data["position"][0].append(orbit["position"]["x"])
data["position"][1].append(orbit["position"]["y"])
data["position"][2].append(orbit["position"]["z"])
data["velocity"][0].append(orbit["velocity"]["x"])
data["velocity"][1].append(orbit["velocity"]["y"])
data["velocity"][2].append(orbit["velocity"]["z"])
if orbit["frame"] != reference_system:
warnings.warn(
"reference_system is not consistent in all the state vectors. "
)
reference_system = None
position = xr.Variable(data=data["position"], dims=("axis", "azimuth_time")) # type: ignore
velocity = xr.Variable(data=data["velocity"], dims=("axis", "azimuth_time")) # type: ignore
attrs = {}
if reference_system is not None:
attrs.update({"reference_system": reference_system})
ds = xr.Dataset(
data_vars={"position": position, "velocity": velocity},
attrs=attrs,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"axis": [0, 1, 2],
},
)
return ds
def open_dc_estimate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
dc_estimates = esa_safe.parse_tag_as_list(annotation, ".//dcEstimate")
azimuth_time = []
t0 = []
data_dc_poly = []
for dc_estimate in dc_estimates:
azimuth_time.append(dc_estimate["azimuthTime"])
t0.append(dc_estimate["t0"])
data_dc_poly.append(
[float(c) for c in dc_estimate["dataDcPolynomial"]["$"].split()]
)
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"data_dc_polynomial": (("azimuth_time", "degree"), data_dc_poly),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(data_dc_poly[0]))),
},
)
return ds
def open_azimuth_fm_rate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
azimuth_fm_rates = esa_safe.parse_tag_as_list(annotation, ".//azimuthFmRate")
azimuth_time = []
t0 = []
azimuth_fm_rate_poly = []
for azimuth_fm_rate in azimuth_fm_rates:
azimuth_time.append(azimuth_fm_rate["azimuthTime"])
t0.append(azimuth_fm_rate["t0"])
azimuth_fm_rate_poly.append(
[float(c) for c in azimuth_fm_rate["azimuthFmRatePolynomial"]["$"].split()]
)
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"azimuth_fm_rate_polynomial": (
("azimuth_time", "degree"),
azimuth_fm_rate_poly,
),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(azimuth_fm_rate_poly[0]))),
},
)
return ds
def find_available_groups(
product_files: T.Dict[str, T.Tuple[str, str, str, str, str]],
product_path: str,
check_files_exist: bool = False,
fs: fsspec.AbstractFileSystem = fsspec.filesystem("file"),
) -> T.Dict[str, T.List[str]]:
groups: T.Dict[str, T.List[str]] = {}
for path, (type, _, swath, polarization, _) in product_files.items():
swath_pol_group = f"{swath}/{polarization}".upper()
abspath = os.path.join(product_path, os.path.normpath(path))
if check_files_exist:
if not fs.exists(abspath):
continue
if type == "s1Level1ProductSchema":
groups[swath.upper()] = [""]
groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])
for metadata_group in [
"orbit",
"attitude",
"azimuth_fm_rate",
"dc_estimate",
"gcp",
"coordinate_conversion",
]:
groups[f"{swath_pol_group}/{metadata_group}"] = [abspath]
elif type == "s1Level1CalibrationSchema":
groups[f"{swath_pol_group}/calibration"] = [abspath]
elif type == "s1Level1NoiseSchema":
groups[f"{swath_pol_group}/noise_range"] = [abspath]
groups[f"{swath_pol_group}/noise_azimuth"] = [abspath]
elif type == "s1Level1MeasurementSchema":
groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])
return groups
def open_pol_dataset(
measurement: esa_safe.PathOrFileType,
annotation: esa_safe.PathOrFileType,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
) -> xr.Dataset:
product_information = esa_safe.parse_tag(annotation, ".//productInformation")
image_information = esa_safe.parse_tag(annotation, ".//imageInformation")
swath_timing = esa_safe.parse_tag(annotation, ".//swathTiming")
incidence_angle_mid_swath = image_information["incidenceAngleMidSwath"]
number_of_samples = image_information["numberOfSamples"]
first_slant_range_time = image_information["slantRangeTime"]
slant_range_time_interval = 1 / product_information["rangeSamplingRate"]
number_of_lines = image_information["numberOfLines"]
first_azimuth_time = image_information["productFirstLineUtcTime"]
azimuth_time_interval = image_information["azimuthTimeInterval"]
number_of_bursts = swath_timing["burstList"]["@count"]
range_pixel_spaxing = image_information["rangePixelSpacing"]
anx_datetime = image_information["ascendingNodeTime"]
attrs = {
"sar:center_frequency": product_information["radarFrequency"] / 10 ** 9,
"sar:pixel_spacing_azimuth": image_information["azimuthPixelSpacing"],
"sar:pixel_spacing_range": range_pixel_spaxing,
"azimuth_time_interval": azimuth_time_interval,
"slant_range_time_interval": slant_range_time_interval,
"incidence_angle_mid_swath": incidence_angle_mid_swath,
"sat:anx_datetime": anx_datetime + "Z",
}
encoding = {}
swap_dims = {}
chunks: T.Union[None, T.Dict[str, int]] = None
azimuth_time = pd.date_range(
start=first_azimuth_time,
periods=number_of_lines,
freq=pd.Timedelta(azimuth_time_interval, "s"),
).values
if number_of_bursts == 0:
swap_dims = {"line": "azimuth_time", "pixel": "slant_range_time"}
else:
lines_per_burst = swath_timing["linesPerBurst"]
attrs.update(
{
"azimuth_steering_rate": product_information["azimuthSteeringRate"],
"number_of_bursts": number_of_bursts,
"lines_per_burst": lines_per_burst,
}
)
for burst_index, burst in enumerate(swath_timing["burstList"]["burst"]):
first_azimuth_time_burst = burst["azimuthTime"]
azimuth_time_burst = pd.date_range(
start=first_azimuth_time_burst,
periods=lines_per_burst,
freq=pd.Timedelta(azimuth_time_interval, "s"),
)
azimuth_time[
lines_per_burst * burst_index : lines_per_burst * (burst_index + 1)
] = azimuth_time_burst
# chunk at burst boundaries if dask is present
try:
import dask # noqa
encoding["preferred_chunks"] = {"line": lines_per_burst}
chunks = {}
except ModuleNotFoundError:
pass
coords = {
"pixel": np.arange(0, number_of_samples, dtype=int),
"line": np.arange(0, number_of_lines, dtype=int),
"azimuth_time": ("line", azimuth_time),
}
if product_information["projection"] == "Slant Range":
slant_range_time = np.linspace(
first_slant_range_time,
first_slant_range_time
+ slant_range_time_interval * (number_of_samples - 1),
number_of_samples,
)
coords["slant_range_time"] = ("pixel", slant_range_time)
elif product_information["projection"] == "Ground Range":
ground_range = np.linspace(
0,
range_pixel_spaxing * (number_of_samples - 1),
number_of_samples,
)
coords["ground_range"] = ("pixel", ground_range)
swap_dims = {"line": "azimuth_time", "pixel": "ground_range"}
else:
raise ValueError(f"unknown projection {product_information["projection"]}")
# temporary ugly work-around to get fsspec support with rasterio >= 1.3a3
# the try block uses fsspec if rasterio >= 1.3a3 is installed
# the except block falls back to standard file based rasterio
# the with is needed to avoid polluting stderr when the try block fails
with contextlib.redirect_stderr(open("/dev/null", "w")):
try:
arr = xr.open_dataarray(fs.open(measurement), engine="rasterio", chunks=chunks) # type: ignore
except AttributeError:
arr = xr.open_dataarray(measurement, engine="rasterio") # type: ignore
arr = arr.squeeze("band").drop_vars(["band", "spatial_ref"])
arr = arr.rename({"y": "line", "x": "pixel"})
arr = arr.assign_coords(coords)
arr = arr.swap_dims(swap_dims)
arr.attrs.update(attrs)
arr.encoding.update(encoding)
return xr.Dataset(attrs=attrs, data_vars={"measurement": arr})
def find_bursts_index(
pol_dataset: xr.Dataset,
azimuth_anx_time: float,
use_center: bool = False,
) -> int:
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
anx_datetime = np.datetime64(pol_dataset.attrs["sat:anx_datetime"].replace("Z", ""))
azimuth_anx_time = pd.Timedelta(azimuth_anx_time, unit="s")
if use_center:
azimuth_anx_time_center = (
pol_dataset.azimuth_time[lines_per_burst // 2 :: lines_per_burst]
- anx_datetime
)
distance = abs(azimuth_anx_time_center - azimuth_anx_time)
else:
azimuth_anx_time_first_line = (
pol_dataset.azimuth_time[::lines_per_burst] - anx_datetime
)
distance = abs(azimuth_anx_time_first_line - azimuth_anx_time)
return distance.argmin().item() # type: ignore
def crop_burst_dataset(
pol_dataset: xr.Dataset,
burst_index: T.Optional[int] = None,
azimuth_anx_time: T.Optional[float] = None,
use_center: bool = False,
) -> xr.Dataset:
if (burst_index is not None) and (azimuth_anx_time is not None):
raise TypeError(
"only one keyword between 'burst_index' and 'azimuth_anx_time' must be defined"
)
if burst_index is None:
if azimuth_anx_time is not None:
burst_index = find_bursts_index(
pol_dataset, azimuth_anx_time, use_center=use_center
)
else:
raise TypeError(
"one keyword between 'burst_index' and 'azimuth_anx_time' must be defined"
)
if burst_index < 0 or burst_index >= pol_dataset.attrs["number_of_bursts"]:
raise IndexError(f"burst_index={burst_index} out of bounds")
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
ds = pol_dataset.sel(
line=slice(
lines_per_burst * burst_index, lines_per_burst * (burst_index + 1) - 1
)
)
anx_datetime = np.datetime64(pol_dataset.attrs["sat:anx_datetime"].replace("Z", ""))
burst_azimuth_anx_times = ds.azimuth_time - anx_datetime
ds.attrs["azimuth_anx_time"] = burst_azimuth_anx_times.values[0] / ONE_SECOND
ds = ds.swap_dims({"line": "azimuth_time", "pixel": "slant_range_time"})
ds.attrs["burst_index"] = burst_index
return ds
def mosaic_slc_iw(slc_iw_image: xr.Dataset, crop: int = 90) -> xr.Dataset:
bursts = []
for i in range(slc_iw_image.attrs["number_of_bursts"]):
burst = crop_burst_dataset(slc_iw_image, burst_index=i)
bursts.append(burst.isel(azimuth_time=slice(crop, -crop)))
return xr.concat(bursts, dim="azimuth_time")
def calibrate_amplitude(
digital_number: xr.DataArray, calibration_lut: xr.DataArray
) -> xr.DataArray:
calibration = calibration_lut.interp(
line=digital_number.line,
pixel=digital_number.pixel,
).astype(np.float32)
amplitude = digital_number / calibration
amplitude.attrs.update(digital_number.attrs)
try:
lut_name = calibration_lut.attrs["long_name"].partition("calibration LUT")[0]
amplitude.attrs["long_name"] = f"amplitude for {lut_name}"
amplitude.attrs["units"] = calibration.attrs["units"]
except KeyError:
pass
return amplitude
def calibrate_intensity(
digital_number: xr.DataArray,
calibration_lut: xr.DataArray,
as_db: bool = False,
min_db: T.Optional[float] = -40.0,
) -> xr.DataArray:
amplitude = calibrate_amplitude(digital_number, calibration_lut)
intensity = abs(amplitude) ** 2
if as_db:
intensity = 10.0 * np.log10(intensity)
if min_db is not None:
intensity = np.maximum(intensity, min_db)
intensity.attrs.update(amplitude.attrs)
intensity.attrs["units"] = "dB"
else:
intensity.attrs.update(amplitude.attrs)
intensity.attrs["units"] = "m2 m-2"
try:
lut_name = amplitude.attrs["long_name"].partition("amplitude for ")[2]
intensity.attrs["long_name"] = lut_name
except KeyError:
pass
return intensity
def slant_range_time_to_ground_range(
azimuth_time: xr.DataArray,
slant_range_time: xr.DataArray,
coordinate_conversion: xr.DataArray,
) -> xr.DataArray:
slant_range = SPEED_OF_LIGHT / 2.0 * slant_range_time
cc = coordinate_conversion.interp(azimuth_time=azimuth_time)
x = slant_range - cc.sr0
ground_range = (cc.srgrCoefficients * x ** cc.degree).sum("degree")
return ground_range # type: ignore
def assign_slant_range_time_coord(
measurement: xr.Dataset, coordinate_conversion: xr.Dataset
) -> xr.Dataset:
x = measurement.ground_range - coordinate_conversion.gr0
slant_range = (
coordinate_conversion.grsrCoefficients * x ** coordinate_conversion.degree
).sum(dim="degree")
slant_range_coord = slant_range.interp(
azimuth_time=measurement.azimuth_time, ground_range=measurement.ground_range
).data
slant_range_time = 2 / SPEED_OF_LIGHT * slant_range_coord
measurement = measurement.assign_coords(
slant_range_time=(("azimuth_time", "ground_range"), slant_range_time)
) # type: ignore
return measurement
def build_burst_id(lat: float, lon: float, relative_orbit: int) -> str:
lat = int(round(lat * 10))
lon = int(round(lon * 10))
n_or_s = "N" if lat >= 0 else "S"
e_or_w = "E" if lon >= 0 else "W"
burst_id = f"R{relative_orbit:03}" f"-{n_or_s}{lat:03}" f"-{e_or_w}{lon:04}"
return burst_id
def compute_burst_centres(
gcp: xr.Dataset,
) -> T.Tuple[T.List[float], T.List[float]]:
gcp_rolling = gcp.rolling(azimuth_time=2, min_periods=1)
gc_az_win = gcp_rolling.construct(azimuth_time="az_win")
centre = gc_az_win.mean(["az_win", "slant_range_time"])
centre = centre.isel(azimuth_time=slice(1, None))
return centre.latitude.values.tolist(), centre.longitude.values.tolist()
METADATA_OPENERS = {
"orbit": open_orbit_dataset,
"attitude": open_attitude_dataset,
"azimuth_fm_rate": open_azimuth_fm_rate_dataset,
"dc_estimate": open_dc_estimate_dataset,
"gcp": open_gcp_dataset,
"coordinate_conversion": open_coordinate_conversion_dataset,
"calibration": open_calibration_dataset,
"noise_range": open_noise_range_dataset,
"noise_azimuth": open_noise_azimuth_dataset,
}
def do_override_product_files(
template: str, product_files: T.Dict[str, T.Tuple[str, str, str, str, str]]
) -> T.Dict[str, T.Tuple[str, str, str, str, str]]:
overridden_product_files = {}
for path, description in product_files.items():
type, prefix, swath, polarization, date = description
ext = os.path.splitext(path)[1]
dirname = os.path.dirname(path)
overridden_path = template.format(**locals())
overridden_product_files[overridden_path] = description
return overridden_product_files
def open_sentinel1_dataset(
product_urlpath: esa_safe.PathType,
*,
drop_variables: T.Optional[T.Tuple[str]] = None,
group: T.Optional[str] = None,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
storage_options: T.Optional[T.Dict[str, T.Any]] = None,
check_files_exist: bool = False,
override_product_files: T.Optional[str] = None,
) -> xr.Dataset:
if drop_variables is not None:
warnings.warn("'drop_variables' is currently ignored")
fs, manifest_path = get_fs_path(product_urlpath, fs, storage_options)
if fs.isdir(manifest_path):
manifest_path = os.path.join(manifest_path, "manifest.safe")
product_path = os.path.dirname(manifest_path)
with fs.open(manifest_path) as file:
product_attrs, product_files = esa_safe.parse_manifest_sentinel1(file)
if override_product_files:
product_files = do_override_product_files(override_product_files, product_files)
groups = find_available_groups(
product_files, product_path, check_files_exist=check_files_exist, fs=fs
)
group, burst_index = normalise_group(group)
absgroup = f"/{group}"
if group != "" and group not in groups:
raise ValueError(
f"Invalid group {group!r}, please select one of the following groups:"
f"\n{list(groups.keys())}"
)
metadata = ""
ds = xr.Dataset()
if group == "":
subgroups = list(groups)
else:
subgroups = [
g[len(group) + 1 :] for g in groups if g.startswith(group) and g != group
]
if group.count("/") == 1:
with fs.open(groups[group][1]) as annotation:
ds = open_pol_dataset(groups[group][0], annotation, fs=fs)
elif group.count("/") == 2:
_, _, metadata = group.split("/", 2)
with fs.open(groups[group][0]) as file:
ds = METADATA_OPENERS[metadata](file)
for data_var in ds.data_vars:
ds.data_vars[data_var].attrs.update(product_attrs)
product_attrs["group"] = absgroup
if len(subgroups):
product_attrs["subgroups"] = subgroups
ds.attrs.update(product_attrs) # type: ignore
if group.count("/") == 1 and burst_index is not None:
ds = crop_burst_dataset(ds, burst_index=burst_index)
conventions.update_attributes(ds, group=metadata)
return ds
|
"""Map Sentinel-1 data products to xarray.
References:
- Sentinel-1 document library: https://sentinels.copernicus.eu/web/sentinel/user-guides/sentinel-1-sar/document-library
- Sentinel-1 Product Specification v3.9 07 May 2021 S1-RS-MDA-52-7441-3-9 documenting IPF 3.40
https://sentinel.esa.int/documents/247904/1877131/S1-RS-MDA-52-7441-3-9-2_Sentinel-1ProductSpecification.pdf
- Sentinel-1 Product Specification v3.7 27 February 2020 S1-RS-MDA-52-7441 documenting IPF 3.30
https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Specification
"""
import contextlib
import os
import typing as T
import warnings
import fsspec
import numpy as np
import pandas as pd
import xarray as xr
from . import conventions, esa_safe
SPEED_OF_LIGHT = 299_792_458 # m / s
ONE_SECOND = np.timedelta64(1, "s")
def get_fs_path(
urlpath_or_path: esa_safe.PathType,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
storage_options: T.Optional[T.Dict[str, T.Any]] = None,
) -> T.Tuple[fsspec.AbstractFileSystem, str]:
if fs is not None and storage_options is not None:
raise TypeError("only one of 'fs' and 'storage_options' can be not None")
if fs is None:
fs, _, paths = fsspec.get_fs_token_paths(
urlpath_or_path, storage_options=storage_options
)
if len(paths) == 0:
raise ValueError(f"file or object not found {urlpath_or_path!r}")
elif len(paths) > 1:
raise ValueError(f"multiple files or objects found {urlpath_or_path!r}")
path = paths[0]
else:
path = str(urlpath_or_path)
return fs, path
def normalise_group(group: T.Optional[str]) -> T.Tuple[str, T.Optional[int]]:
if group is None:
group = ""
if group.startswith("/"):
group = group[1:]
burst_index = None
parent_group, _, last_name = group.rpartition("/")
if parent_group.count("/") == 1 and last_name.isnumeric():
burst_index = int(last_name)
group = parent_group
return group, burst_index
def open_calibration_dataset(calibration: esa_safe.PathType) -> xr.Dataset:
calibration_vectors = esa_safe.parse_tag_as_list(
calibration, ".//calibrationVector", "calibration"
)
azimuth_time_list = []
pixel_list = []
line_list = []
sigmaNought_list = []
betaNought_list = []
gamma_list = []
dn_list = []
for vector in calibration_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ") # type: ignore
pixel_list.append(pixel)
sigmaNought = np.fromstring(vector["sigmaNought"]["$"], dtype=np.float32, sep=" ") # type: ignore
sigmaNought_list.append(sigmaNought)
betaNought = np.fromstring(vector["betaNought"]["$"], dtype=np.float32, sep=" ") # type: ignore
betaNought_list.append(betaNought)
gamma = np.fromstring(vector["gamma"]["$"], dtype=np.float32, sep=" ") # type: ignore
gamma_list.append(gamma)
dn = np.fromstring(vector["dn"]["$"], dtype=np.float32, sep=" ") # type: ignore
dn_list.append(dn)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise calibration vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"sigmaNought": (("line", "pixel"), sigmaNought_list),
"betaNought": (("line", "pixel"), betaNought_list),
"gamma": (("line", "pixel"), gamma_list),
"dn": (("line", "pixel"), dn_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_noise_range_dataset(noise: esa_safe.PathType) -> xr.Dataset:
noise_vectors = esa_safe.parse_tag_as_list(noise, ".//noiseRangeVector", "noise")
azimuth_time_list = []
pixel_list = []
line_list = []
noiseRangeLut_list = []
for vector in noise_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ") # type: ignore
pixel_list.append(pixel)
noiseRangeLut = np.fromstring(vector["noiseRangeLut"]["$"], dtype=np.float32, sep=" ") # type: ignore
noiseRangeLut_list.append(noiseRangeLut)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise noise vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"noiseRangeLut": (("line", "pixel"), noiseRangeLut_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_noise_azimuth_dataset(noise: esa_safe.PathType) -> xr.Dataset:
noise_vectors = esa_safe.parse_tag_as_list(noise, ".//noiseAzimuthVector", "noise")
first_range_sample = []
line_list = []
noiseAzimuthLut_list = []
for vector in noise_vectors:
first_range_sample.append(vector["firstRangeSample"])
line = np.fromstring(vector["line"]["$"], dtype=int, sep=" ") # type: ignore
line_list.append(line)
noiseAzimuthLut = np.fromstring(vector["noiseAzimuthLut"]["$"], dtype=np.float32, sep=" ") # type: ignore
noiseAzimuthLut_list.append(noiseAzimuthLut)
# BROKEN: GRDs have line and noiseAzimuthLut of different size, we take the first one
data_vars = {}
coords = {}
if first_range_sample:
data_vars["noiseAzimuthLut"] = ("line", noiseAzimuthLut_list[0])
coords["line"] = line_list[0]
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_coordinate_conversion_dataset(
annotation_path: esa_safe.PathType,
) -> xr.Dataset:
coordinate_conversion = esa_safe.parse_tag_as_list(
annotation_path, ".//coordinateConversionList/coordinateConversion"
)
gr0 = []
sr0 = []
azimuth_time = []
slant_range_time = []
srgrCoefficients: T.List[T.List[float]] = []
grsrCoefficients: T.List[T.List[float]] = []
for values in coordinate_conversion:
sr0.append(values["sr0"])
gr0.append(values["gr0"])
azimuth_time.append(values["azimuthTime"])
slant_range_time.append(values["slantRangeTime"])
srgrCoefficients.append(
[float(v) for v in values["srgrCoefficients"]["$"].split()]
)
grsrCoefficients.append(
[float(v) for v in values["grsrCoefficients"]["$"].split()]
)
coords: T.Dict[str, T.Any] = {}
data_vars: T.Dict[str, T.Any] = {}
if srgrCoefficients:
coords["azimuth_time"] = [np.datetime64(dt) for dt in azimuth_time]
coords["degree"] = list(range(len(srgrCoefficients[0])))
data_vars["gr0"] = ("azimuth_time", gr0)
data_vars["sr0"] = ("azimuth_time", sr0)
data_vars["slant_range_time"] = ("azimuth_time", slant_range_time)
data_vars["srgrCoefficients"] = (("azimuth_time", "degree"), srgrCoefficients)
data_vars["grsrCoefficients"] = (("azimuth_time", "degree"), grsrCoefficients)
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_gcp_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
geolocation_grid_points = esa_safe.parse_tag_as_list(
annotation, ".//geolocationGridPoint"
)
azimuth_time = []
slant_range_time = []
line_set = set()
pixel_set = set()
for ggp in geolocation_grid_points:
if ggp["line"] not in line_set:
azimuth_time.append(np.datetime64(ggp["azimuthTime"]))
line_set.add(ggp["line"])
if ggp["pixel"] not in pixel_set:
slant_range_time.append(ggp["slantRangeTime"])
pixel_set.add(ggp["pixel"])
shape = (len(azimuth_time), len(slant_range_time))
dims = ("azimuth_time", "slant_range_time")
data_vars = {
"latitude": (dims, np.full(shape, np.nan)),
"longitude": (dims, np.full(shape, np.nan)),
"height": (dims, np.full(shape, np.nan)),
"incidenceAngle": (dims, np.full(shape, np.nan)),
"elevationAngle": (dims, np.full(shape, np.nan)),
}
line = sorted(line_set)
pixel = sorted(pixel_set)
for ggp in geolocation_grid_points:
for var in data_vars:
j = line.index(ggp["line"])
i = pixel.index(ggp["pixel"])
data_vars[var][1][j, i] = ggp[var]
ds = xr.Dataset(
data_vars=data_vars,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"slant_range_time": slant_range_time,
"line": ("azimuth_time", line),
"pixel": ("slant_range_time", pixel),
},
)
return ds
def open_attitude_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
attitudes = esa_safe.parse_tag_as_list(annotation, ".//attitude")
variables = ["q0", "q1", "q2", "q3", "wx", "wy", "wz", "pitch", "roll", "yaw"]
azimuth_time: T.List[T.Any] = []
data_vars: T.Dict[str, T.Any] = {var: ("azimuth_time", []) for var in variables}
for attitude in attitudes:
azimuth_time.append(attitude["time"])
for var in variables:
data_vars[var][1].append(attitude[var])
ds = xr.Dataset(
data_vars=data_vars,
coords={"azimuth_time": [np.datetime64(dt) for dt in azimuth_time]},
)
return ds
def open_orbit_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
orbits = esa_safe.parse_tag_as_list(annotation, ".//orbit")
reference_system = orbits[0]["frame"]
variables = ["position", "velocity"]
data: T.Dict[str, T.List[T.Any]] = {var: [[], [], []] for var in variables}
azimuth_time: T.List[T.Any] = []
for orbit in orbits:
azimuth_time.append(orbit["time"])
data["position"][0].append(orbit["position"]["x"])
data["position"][1].append(orbit["position"]["y"])
data["position"][2].append(orbit["position"]["z"])
data["velocity"][0].append(orbit["velocity"]["x"])
data["velocity"][1].append(orbit["velocity"]["y"])
data["velocity"][2].append(orbit["velocity"]["z"])
if orbit["frame"] != reference_system:
warnings.warn(
"reference_system is not consistent in all the state vectors. "
)
reference_system = None
position = xr.Variable(data=data["position"], dims=("axis", "azimuth_time")) # type: ignore
velocity = xr.Variable(data=data["velocity"], dims=("axis", "azimuth_time")) # type: ignore
attrs = {}
if reference_system is not None:
attrs.update({"reference_system": reference_system})
ds = xr.Dataset(
data_vars={"position": position, "velocity": velocity},
attrs=attrs,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"axis": [0, 1, 2],
},
)
return ds
def open_dc_estimate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
dc_estimates = esa_safe.parse_tag_as_list(annotation, ".//dcEstimate")
azimuth_time = []
t0 = []
data_dc_poly = []
for dc_estimate in dc_estimates:
azimuth_time.append(dc_estimate["azimuthTime"])
t0.append(dc_estimate["t0"])
data_dc_poly.append(
[float(c) for c in dc_estimate["dataDcPolynomial"]["$"].split()]
)
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"data_dc_polynomial": (("azimuth_time", "degree"), data_dc_poly),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(data_dc_poly[0]))),
},
)
return ds
def open_azimuth_fm_rate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
azimuth_fm_rates = esa_safe.parse_tag_as_list(annotation, ".//azimuthFmRate")
azimuth_time = []
t0 = []
azimuth_fm_rate_poly = []
for azimuth_fm_rate in azimuth_fm_rates:
azimuth_time.append(azimuth_fm_rate["azimuthTime"])
t0.append(azimuth_fm_rate["t0"])
azimuth_fm_rate_poly.append(
[float(c) for c in azimuth_fm_rate["azimuthFmRatePolynomial"]["$"].split()]
)
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"azimuth_fm_rate_polynomial": (
("azimuth_time", "degree"),
azimuth_fm_rate_poly,
),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(azimuth_fm_rate_poly[0]))),
},
)
return ds
def find_available_groups(
product_files: T.Dict[str, T.Tuple[str, str, str, str, str]],
product_path: str,
check_files_exist: bool = False,
fs: fsspec.AbstractFileSystem = fsspec.filesystem("file"),
) -> T.Dict[str, T.List[str]]:
groups: T.Dict[str, T.List[str]] = {}
for path, (type, _, swath, polarization, _) in product_files.items():
swath_pol_group = f"{swath}/{polarization}".upper()
abspath = os.path.join(product_path, os.path.normpath(path))
if check_files_exist:
if not fs.exists(abspath):
continue
if type == "s1Level1ProductSchema":
groups[swath.upper()] = [""]
groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])
for metadata_group in [
"orbit",
"attitude",
"azimuth_fm_rate",
"dc_estimate",
"gcp",
"coordinate_conversion",
]:
groups[f"{swath_pol_group}/{metadata_group}"] = [abspath]
elif type == "s1Level1CalibrationSchema":
groups[f"{swath_pol_group}/calibration"] = [abspath]
elif type == "s1Level1NoiseSchema":
groups[f"{swath_pol_group}/noise_range"] = [abspath]
groups[f"{swath_pol_group}/noise_azimuth"] = [abspath]
elif type == "s1Level1MeasurementSchema":
groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])
return groups
def open_pol_dataset(
measurement: esa_safe.PathOrFileType,
annotation: esa_safe.PathOrFileType,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
) -> xr.Dataset:
product_information = esa_safe.parse_tag(annotation, ".//productInformation")
image_information = esa_safe.parse_tag(annotation, ".//imageInformation")
swath_timing = esa_safe.parse_tag(annotation, ".//swathTiming")
incidence_angle_mid_swath = image_information["incidenceAngleMidSwath"]
number_of_samples = image_information["numberOfSamples"]
first_slant_range_time = image_information["slantRangeTime"]
slant_range_time_interval = 1 / product_information["rangeSamplingRate"]
number_of_lines = image_information["numberOfLines"]
first_azimuth_time = image_information["productFirstLineUtcTime"]
azimuth_time_interval = image_information["azimuthTimeInterval"]
number_of_bursts = swath_timing["burstList"]["@count"]
range_pixel_spaxing = image_information["rangePixelSpacing"]
anx_datetime = image_information["ascendingNodeTime"]
attrs = {
"sar:center_frequency": product_information["radarFrequency"] / 10 ** 9,
"sar:pixel_spacing_azimuth": image_information["azimuthPixelSpacing"],
"sar:pixel_spacing_range": range_pixel_spaxing,
"azimuth_time_interval": azimuth_time_interval,
"slant_range_time_interval": slant_range_time_interval,
"incidence_angle_mid_swath": incidence_angle_mid_swath,
"sat:anx_datetime": anx_datetime + "Z",
}
encoding = {}
swap_dims = {}
chunks: T.Union[None, T.Dict[str, int]] = None
azimuth_time = pd.date_range(
start=first_azimuth_time,
periods=number_of_lines,
freq=pd.Timedelta(azimuth_time_interval, "s"),
).values
if number_of_bursts == 0:
swap_dims = {"line": "azimuth_time", "pixel": "slant_range_time"}
else:
lines_per_burst = swath_timing["linesPerBurst"]
attrs.update(
{
"azimuth_steering_rate": product_information["azimuthSteeringRate"],
"number_of_bursts": number_of_bursts,
"lines_per_burst": lines_per_burst,
}
)
for burst_index, burst in enumerate(swath_timing["burstList"]["burst"]):
first_azimuth_time_burst = burst["azimuthTime"]
azimuth_time_burst = pd.date_range(
start=first_azimuth_time_burst,
periods=lines_per_burst,
freq=pd.Timedelta(azimuth_time_interval, "s"),
)
azimuth_time[
lines_per_burst * burst_index : lines_per_burst * (burst_index + 1)
] = azimuth_time_burst
# chunk at burst boundaries if dask is present
try:
import dask # noqa
encoding["preferred_chunks"] = {"line": lines_per_burst}
chunks = {}
except ModuleNotFoundError:
pass
coords = {
"pixel": np.arange(0, number_of_samples, dtype=int),
"line": np.arange(0, number_of_lines, dtype=int),
"azimuth_time": ("line", azimuth_time),
}
if product_information["projection"] == "Slant Range":
slant_range_time = np.linspace(
first_slant_range_time,
first_slant_range_time
+ slant_range_time_interval * (number_of_samples - 1),
number_of_samples,
)
coords["slant_range_time"] = ("pixel", slant_range_time)
elif product_information["projection"] == "Ground Range":
ground_range = np.linspace(
0,
range_pixel_spaxing * (number_of_samples - 1),
number_of_samples,
)
coords["ground_range"] = ("pixel", ground_range)
swap_dims = {"line": "azimuth_time", "pixel": "ground_range"}
else:
raise ValueError(f"unknown projection {product_information['projection']}")
# temporary ugly work-around to get fsspec support with rasterio >= 1.3a3
# the try block uses fsspec if rasterio >= 1.3a3 is installed
# the except block falls back to standard file based rasterio
# the with is needed to avoid polluting stderr when the try block fails
with contextlib.redirect_stderr(open("/dev/null", "w")):
try:
arr = xr.open_dataarray(fs.open(measurement), engine="rasterio", chunks=chunks) # type: ignore
except AttributeError:
arr = xr.open_dataarray(measurement, engine="rasterio") # type: ignore
arr = arr.squeeze("band").drop_vars(["band", "spatial_ref"])
arr = arr.rename({"y": "line", "x": "pixel"})
arr = arr.assign_coords(coords)
arr = arr.swap_dims(swap_dims)
arr.attrs.update(attrs)
arr.encoding.update(encoding)
return xr.Dataset(attrs=attrs, data_vars={"measurement": arr})
def find_bursts_index(
pol_dataset: xr.Dataset,
azimuth_anx_time: float,
use_center: bool = False,
) -> int:
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
anx_datetime = np.datetime64(pol_dataset.attrs["sat:anx_datetime"].replace("Z", ""))
azimuth_anx_time = pd.Timedelta(azimuth_anx_time, unit="s")
if use_center:
azimuth_anx_time_center = (
pol_dataset.azimuth_time[lines_per_burst // 2 :: lines_per_burst]
- anx_datetime
)
distance = abs(azimuth_anx_time_center - azimuth_anx_time)
else:
azimuth_anx_time_first_line = (
pol_dataset.azimuth_time[::lines_per_burst] - anx_datetime
)
distance = abs(azimuth_anx_time_first_line - azimuth_anx_time)
return distance.argmin().item() # type: ignore
def crop_burst_dataset(
pol_dataset: xr.Dataset,
burst_index: T.Optional[int] = None,
azimuth_anx_time: T.Optional[float] = None,
use_center: bool = False,
) -> xr.Dataset:
if (burst_index is not None) and (azimuth_anx_time is not None):
raise TypeError(
"only one keyword between 'burst_index' and 'azimuth_anx_time' must be defined"
)
if burst_index is None:
if azimuth_anx_time is not None:
burst_index = find_bursts_index(
pol_dataset, azimuth_anx_time, use_center=use_center
)
else:
raise TypeError(
"one keyword between 'burst_index' and 'azimuth_anx_time' must be defined"
)
if burst_index < 0 or burst_index >= pol_dataset.attrs["number_of_bursts"]:
raise IndexError(f"burst_index={burst_index} out of bounds")
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
ds = pol_dataset.sel(
line=slice(
lines_per_burst * burst_index, lines_per_burst * (burst_index + 1) - 1
)
)
anx_datetime = np.datetime64(pol_dataset.attrs["sat:anx_datetime"].replace("Z", ""))
burst_azimuth_anx_times = ds.azimuth_time - anx_datetime
ds.attrs["azimuth_anx_time"] = burst_azimuth_anx_times.values[0] / ONE_SECOND
ds = ds.swap_dims({"line": "azimuth_time", "pixel": "slant_range_time"})
ds.attrs["burst_index"] = burst_index
return ds
def mosaic_slc_iw(slc_iw_image: xr.Dataset, crop: int = 90) -> xr.Dataset:
bursts = []
for i in range(slc_iw_image.attrs["number_of_bursts"]):
burst = crop_burst_dataset(slc_iw_image, burst_index=i)
bursts.append(burst.isel(azimuth_time=slice(crop, -crop)))
return xr.concat(bursts, dim="azimuth_time")
def calibrate_amplitude(
digital_number: xr.DataArray, calibration_lut: xr.DataArray
) -> xr.DataArray:
calibration = calibration_lut.interp(
line=digital_number.line,
pixel=digital_number.pixel,
).astype(np.float32)
amplitude = digital_number / calibration
amplitude.attrs.update(digital_number.attrs)
try:
lut_name = calibration_lut.attrs["long_name"].partition("calibration LUT")[0]
amplitude.attrs["long_name"] = f"amplitude for {lut_name}"
amplitude.attrs["units"] = calibration.attrs["units"]
except KeyError:
pass
return amplitude
def calibrate_intensity(
digital_number: xr.DataArray,
calibration_lut: xr.DataArray,
as_db: bool = False,
min_db: T.Optional[float] = -40.0,
) -> xr.DataArray:
amplitude = calibrate_amplitude(digital_number, calibration_lut)
intensity = abs(amplitude) ** 2
if as_db:
intensity = 10.0 * np.log10(intensity)
if min_db is not None:
intensity = np.maximum(intensity, min_db)
intensity.attrs.update(amplitude.attrs)
intensity.attrs["units"] = "dB"
else:
intensity.attrs.update(amplitude.attrs)
intensity.attrs["units"] = "m2 m-2"
try:
lut_name = amplitude.attrs["long_name"].partition("amplitude for ")[2]
intensity.attrs["long_name"] = lut_name
except KeyError:
pass
return intensity
def slant_range_time_to_ground_range(
azimuth_time: xr.DataArray,
slant_range_time: xr.DataArray,
coordinate_conversion: xr.DataArray,
) -> xr.DataArray:
slant_range = SPEED_OF_LIGHT / 2.0 * slant_range_time
cc = coordinate_conversion.interp(azimuth_time=azimuth_time)
x = slant_range - cc.sr0
ground_range = (cc.srgrCoefficients * x ** cc.degree).sum("degree")
return ground_range # type: ignore
def assign_slant_range_time_coord(
measurement: xr.Dataset, coordinate_conversion: xr.Dataset
) -> xr.Dataset:
x = measurement.ground_range - coordinate_conversion.gr0
slant_range = (
coordinate_conversion.grsrCoefficients * x ** coordinate_conversion.degree
).sum(dim="degree")
slant_range_coord = slant_range.interp(
azimuth_time=measurement.azimuth_time, ground_range=measurement.ground_range
).data
slant_range_time = 2 / SPEED_OF_LIGHT * slant_range_coord
measurement = measurement.assign_coords(
slant_range_time=(("azimuth_time", "ground_range"), slant_range_time)
) # type: ignore
return measurement
def build_burst_id(lat: float, lon: float, relative_orbit: int) -> str:
lat = int(round(lat * 10))
lon = int(round(lon * 10))
n_or_s = "N" if lat >= 0 else "S"
e_or_w = "E" if lon >= 0 else "W"
burst_id = f"R{relative_orbit:03}" f"-{n_or_s}{lat:03}" f"-{e_or_w}{lon:04}"
return burst_id
def compute_burst_centres(
gcp: xr.Dataset,
) -> T.Tuple[T.List[float], T.List[float]]:
gcp_rolling = gcp.rolling(azimuth_time=2, min_periods=1)
gc_az_win = gcp_rolling.construct(azimuth_time="az_win")
centre = gc_az_win.mean(["az_win", "slant_range_time"])
centre = centre.isel(azimuth_time=slice(1, None))
return centre.latitude.values.tolist(), centre.longitude.values.tolist()
METADATA_OPENERS = {
"orbit": open_orbit_dataset,
"attitude": open_attitude_dataset,
"azimuth_fm_rate": open_azimuth_fm_rate_dataset,
"dc_estimate": open_dc_estimate_dataset,
"gcp": open_gcp_dataset,
"coordinate_conversion": open_coordinate_conversion_dataset,
"calibration": open_calibration_dataset,
"noise_range": open_noise_range_dataset,
"noise_azimuth": open_noise_azimuth_dataset,
}
def do_override_product_files(
template: str, product_files: T.Dict[str, T.Tuple[str, str, str, str, str]]
) -> T.Dict[str, T.Tuple[str, str, str, str, str]]:
overridden_product_files = {}
for path, description in product_files.items():
type, prefix, swath, polarization, date = description
ext = os.path.splitext(path)[1]
dirname = os.path.dirname(path)
overridden_path = template.format(**locals())
overridden_product_files[overridden_path] = description
return overridden_product_files
def open_sentinel1_dataset(
product_urlpath: esa_safe.PathType,
*,
drop_variables: T.Optional[T.Tuple[str]] = None,
group: T.Optional[str] = None,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
storage_options: T.Optional[T.Dict[str, T.Any]] = None,
check_files_exist: bool = False,
override_product_files: T.Optional[str] = None,
) -> xr.Dataset:
if drop_variables is not None:
warnings.warn("'drop_variables' is currently ignored")
fs, manifest_path = get_fs_path(product_urlpath, fs, storage_options)
if fs.isdir(manifest_path):
manifest_path = os.path.join(manifest_path, "manifest.safe")
product_path = os.path.dirname(manifest_path)
with fs.open(manifest_path) as file:
product_attrs, product_files = esa_safe.parse_manifest_sentinel1(file)
if override_product_files:
product_files = do_override_product_files(override_product_files, product_files)
groups = find_available_groups(
product_files, product_path, check_files_exist=check_files_exist, fs=fs
)
group, burst_index = normalise_group(group)
absgroup = f"/{group}"
if group != "" and group not in groups:
raise ValueError(
f"Invalid group {group!r}, please select one of the following groups:"
f"\n{list(groups.keys())}"
)
metadata = ""
ds = xr.Dataset()
if group == "":
subgroups = list(groups)
else:
subgroups = [
g[len(group) + 1 :] for g in groups if g.startswith(group) and g != group
]
if group.count("/") == 1:
with fs.open(groups[group][1]) as annotation:
ds = open_pol_dataset(groups[group][0], annotation, fs=fs)
elif group.count("/") == 2:
_, _, metadata = group.split("/", 2)
with fs.open(groups[group][0]) as file:
ds = METADATA_OPENERS[metadata](file)
for data_var in ds.data_vars:
ds.data_vars[data_var].attrs.update(product_attrs)
product_attrs["group"] = absgroup
if len(subgroups):
product_attrs["subgroups"] = subgroups
ds.attrs.update(product_attrs) # type: ignore
if group.count("/") == 1 and burst_index is not None:
ds = crop_burst_dataset(ds, burst_index=burst_index)
conventions.update_attributes(ds, group=metadata)
return ds
|
from typing import Iterable
__all__ = ['in_', 'not_in', 'exists', 'not_exists', 'equal', 'not_equal']
class Operator:
def __init__(self, op_name: str, op: str, value=None):
self.op = op
self.value = value
self.op_name = op_name
def encode(self, key):
return f"{key}{self.op}{self.value}"
class SequenceOperator(Operator):
def encode(self, key):
return f"{key} {self.op} ({",".join(self.value)})"
class BinaryOperator(Operator):
pass
class UnaryOperator(Operator):
def encode(self, key):
return f"{self.op}{key}"
def in_(values: Iterable) -> SequenceOperator:
return SequenceOperator('in_', 'in', sorted(values))
def not_in(values: Iterable) -> SequenceOperator:
return SequenceOperator('not_in', 'notin', sorted(values))
def exists() -> UnaryOperator:
return UnaryOperator('exists', '')
def not_exists() -> UnaryOperator:
return UnaryOperator('not_exists', '!')
def equal(value: str) -> BinaryOperator:
return BinaryOperator('equal', '=', value)
def not_equal(value: str) -> BinaryOperator:
return BinaryOperator('not_equal', '!=', value)
|
from typing import Iterable
__all__ = ['in_', 'not_in', 'exists', 'not_exists', 'equal', 'not_equal']
class Operator:
def __init__(self, op_name: str, op: str, value=None):
self.op = op
self.value = value
self.op_name = op_name
def encode(self, key):
return f"{key}{self.op}{self.value}"
class SequenceOperator(Operator):
def encode(self, key):
return f"{key} {self.op} ({','.join(self.value)})"
class BinaryOperator(Operator):
pass
class UnaryOperator(Operator):
def encode(self, key):
return f"{self.op}{key}"
def in_(values: Iterable) -> SequenceOperator:
return SequenceOperator('in_', 'in', sorted(values))
def not_in(values: Iterable) -> SequenceOperator:
return SequenceOperator('not_in', 'notin', sorted(values))
def exists() -> UnaryOperator:
return UnaryOperator('exists', '')
def not_exists() -> UnaryOperator:
return UnaryOperator('not_exists', '!')
def equal(value: str) -> BinaryOperator:
return BinaryOperator('equal', '=', value)
def not_equal(value: str) -> BinaryOperator:
return BinaryOperator('not_equal', '!=', value)
|
import json
import os
import subprocess
import zipfile
import hashlib
import pytest
import py.path
import exifread
EXECUTABLE = os.getenv("MAPILLARY_TOOLS_EXECUTABLE", "python3 -m mapillary_tools")
IMPORT_PATH = "tests/integration/mapillary_tools_process_images_provider/data"
USERNAME = "test_username_MAKE_SURE_IT_IS_UNIQUE_AND_LONG_AND_BORING"
CONFIG_CONTENT = f"""
[{USERNAME}]
MAPSettingsUsername = {USERNAME}
MAPSettingsUserKey = test_user_key
user_upload_token = test_user_token
"""
@pytest.fixture
def setup_config(tmpdir: py.path.local):
config_path = tmpdir.mkdir("configs").join("CLIENT_ID")
with open(config_path, "w") as fp:
fp.write(CONFIG_CONTENT)
yield config_path
if tmpdir.check():
tmpdir.remove(ignore_errors=True)
@pytest.fixture
def setup_data(tmpdir: py.path.local):
data_path = tmpdir.mkdir("data")
source = py.path.local(IMPORT_PATH)
source.copy(data_path)
yield data_path
if tmpdir.check():
tmpdir.remove(ignore_errors=True)
def test_basic():
for option in ["--version", "--help"]:
x = subprocess.run(f"{EXECUTABLE} {option}", shell=True)
assert x.returncode == 0, x.stderr
def test_process(setup_data: py.path.local):
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = os.path.join(setup_data, "mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
for desc in descs:
assert "filename" in desc
assert os.path.isfile(os.path.join(setup_data, desc["filename"]))
def validate_and_extract_zip(filename: str):
basename = os.path.basename(filename)
assert basename.startswith("mly_tools_"), filename
assert basename.endswith(".zip"), filename
ret = {}
import tempfile
with zipfile.ZipFile(filename) as zipf:
with tempfile.TemporaryDirectory() as tempdir:
zipf.extractall(path=tempdir)
for name in os.listdir(tempdir):
with open(os.path.join(tempdir, name), "rb") as fp:
tags = exifread.process_file(fp)
desc_tag = tags.get("Image ImageDescription")
assert desc_tag is not None, tags
desc = json.loads(str(desc_tag.values))
assert isinstance(desc.get("MAPLatitude"), (float, int)), desc
assert isinstance(desc.get("MAPLongitude"), (float, int)), desc
assert isinstance(desc.get("MAPCaptureTime"), str), desc
assert isinstance(desc.get("MAPCompassHeading"), dict), desc
for key in desc.keys():
assert key.startswith("MAP"), key
ret[name] = desc
return ret
def test_zip(tmpdir: py.path.local, setup_data: py.path.local):
zip_dir = tmpdir.mkdir("zip_dir")
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} zip {setup_data} {zip_dir}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert 0 < len(zip_dir.listdir())
for file in zip_dir.listdir():
validate_and_extract_zip(str(file))
def test_upload_image_dir(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
assert x.returncode == 0, x.stderr
def test_upload_image_dir_twice(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
md5sum_map = {}
# first upload
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
md5sum_map[os.path.basename(file)] = file_md5sum(file)
# expect the second upload to not produce new uploads
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
new_md5sum = file_md5sum(file)
assert md5sum_map[os.path.basename(file)] == new_md5sum
assert len(md5sum_map) == len(upload_dir.listdir())
def test_upload_zip(
tmpdir: py.path.local, setup_data: py.path.local, setup_config: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
zip_dir = tmpdir.mkdir("zip_dir")
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} zip {setup_data} {zip_dir}",
shell=True,
)
assert x.returncode == 0, x.stderr
for zfile in zip_dir.listdir():
x = subprocess.run(
f"{EXECUTABLE} upload {zfile} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
def test_process_and_upload(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process_and_upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
def test_time(setup_data: py.path.local):
# before offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_10_000",
"DSC00497.JPG": "2018_06_08_13_32_28_000",
"V0370574.JPG": "2018_07_27_11_32_14_000",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
# after offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_time=2.5",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_12_500",
"DSC00497.JPG": "2018_06_08_13_32_30_500",
"V0370574.JPG": "2018_07_27_11_32_16_500",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
# after offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_time=-1.0",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_09_000",
"DSC00497.JPG": "2018_06_08_13_32_27_000",
"V0370574.JPG": "2018_07_27_11_32_13_000",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
def test_angle(setup_data: py.path.local):
# before offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": 270.89,
"DSC00497.JPG": 271.27,
"V0370574.JPG": 359.0,
}
for desc in descs:
assert "filename" in desc
assert (
abs(expected[desc["filename"]] - desc["MAPCompassHeading"]["TrueHeading"])
< 0.00001
)
assert (
abs(
expected[desc["filename"]]
- desc["MAPCompassHeading"]["MagneticHeading"]
)
< 0.00001
)
# after offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_angle=2.5",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": 270.89 + 2.5,
"DSC00497.JPG": 271.27 + 2.5,
"V0370574.JPG": 1.5,
}
for desc in descs:
assert "filename" in desc
assert (
abs(expected[desc["filename"]] - desc["MAPCompassHeading"]["TrueHeading"])
< 0.00001
)
assert (
abs(
expected[desc["filename"]]
- desc["MAPCompassHeading"]["MagneticHeading"]
)
< 0.00001
)
def test_process_boolean_options(
setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
boolean_options = [
"--add_file_name",
"--add_import_date",
"--exclude_import_path",
"--interpolate_directions",
"--overwrite_EXIF_direction_tag",
"--overwrite_EXIF_gps_tag",
"--overwrite_EXIF_orientation_tag",
"--overwrite_EXIF_time_tag",
"--overwrite_all_EXIF_tags",
"--skip_subfolders",
"--windows_path",
]
for option in boolean_options:
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} {option}",
shell=True,
)
assert x.returncode == 0, x.stderr
all_options = " ".join(boolean_options)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} {all_options}",
shell=True,
)
assert x.returncode == 0, x.stderr
GPX_CONTENT = """
<gpx>
<trk>
<name>Mapillary GPX</name>
<trkseg>
<trkpt lat="0.02" lon="0.01">
<ele>1</ele>
<time>2018-06-08T13:23:34.805</time>
</trkpt>
<trkpt lat="2.02" lon="0.01">
<ele>2</ele>
<time>2018-06-08T13:24:35.809</time>
</trkpt>
<trkpt lat="2.02" lon="2.01">
<ele>4</ele>
<time>2018-06-08T13:33:36.813</time>
</trkpt>
<trkpt lat="4.02" lon="2.01">
<ele>9</ele>
<time>2018-06-08T13:58:37.812</time>
</trkpt>
</trkseg>
</trk>
</gpx>
"""
def find_desc_errors(descs):
return [desc for desc in descs if "error" in desc]
def filter_out_errors(descs):
return [desc for desc in descs if "error" not in desc]
def test_geotagging_from_gpx(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --geotag_source_path {gpx_file} --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": [
"2018_06_08_13_24_10_000",
0.01,
1.1738587633597797,
1.5769293816798897,
],
"DSC00497.JPG": [
"2018_06_08_13_32_28_000",
1.7556100139740183,
2.02,
3.7456100139740185,
],
}
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_with_offset(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --geotag_source_path {gpx_file} --interpolation_offset_time=-20 --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": [
"2018_06_08_13_23_50_000",
0.01,
0.5181640548160776,
1.2490820274080388,
],
"DSC00497.JPG": [
"2018_06_08_13_32_08_000",
1.6816734072206487,
2.02,
3.671673407220649,
],
}
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_use_gpx_start_time(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --interpolation_use_gpx_start_time --geotag_source_path {gpx_file} --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": ["2018_06_08_13_23_34_805", 0.01, 0.02, 1.0],
"DSC00497.JPG": [
"2018_06_08_13_31_52_805",
1.6255000702397762,
2.02,
3.6155000702397766,
],
}
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_use_gpx_start_time_with_offset(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --interpolation_use_gpx_start_time --geotag_source_path {gpx_file} --interpolation_offset_time=100 --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": [
"2018_06_08_13_25_14_805",
0.15416159584772016,
2.02,
2.14416159584772,
],
"DSC00497.JPG": [
"2018_06_08_13_33_32_805",
1.9951831040066244,
2.02,
3.985183104006625,
],
}
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def ffmpeg_installed():
ffmpeg_path = os.getenv("MAPILLARY_FFMPEG_PATH", "ffmpeg")
try:
subprocess.run([ffmpeg_path, "-version"])
except FileNotFoundError:
return False
return True
is_ffmpeg_installed = ffmpeg_installed()
def test_sample_video(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
for input_path in [setup_data, setup_data.join("sample-5s.mp4")]:
x = subprocess.run(
f"{EXECUTABLE} sample_video --rerun {input_path}",
shell=True,
)
assert x.returncode != 0, x.stderr
assert len(setup_data.join("mapillary_sampled_video_frames").listdir()) == 0
x = subprocess.run(
f"{EXECUTABLE} sample_video --skip_sample_errors --rerun {input_path}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert len(setup_data.join("mapillary_sampled_video_frames").listdir()) == 0
x = subprocess.run(
f"{EXECUTABLE} sample_video --video_start_time 2021_10_10_10_10_10_123 --rerun {input_path}",
shell=True,
)
assert x.returncode == 0, x.stderr
sample_path = setup_data.join("mapillary_sampled_video_frames")
assert len(sample_path.listdir()) == 1
samples = sample_path.join("sample-5s.mp4").listdir()
samples.sort()
times = []
for s in samples:
with s.open("rb") as fp:
tags = exifread.process_file(fp)
times.append(tags["EXIF DateTimeOriginal"].values)
assert (
"2021:10:10 10:10:10.123",
"2021:10:10 10:10:12.123",
"2021:10:10 10:10:14.123",
) == tuple(times)
def test_video_process(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("my_samples").join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} video_process --video_start_time 2018_06_08_13_23_34_123 --geotag_source gpx --geotag_source_path {gpx_file} {setup_data} {setup_data.join("my_samples")}",
shell=True,
)
assert x.returncode != 0, x.stderr
with open(desc_path) as fp:
descs = json.load(fp)
assert 1 == len(find_desc_errors(descs))
assert 2 == len(filter_out_errors(descs))
def test_video_process_multiple_videos(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("my_samples").join("mapillary_image_description.json")
sub_folder = setup_data.join("video_sub_folder").mkdir()
video_path = setup_data.join("sample-5s.mp4")
video_path.copy(sub_folder)
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} video_process --video_start_time 2018_06_08_13_23_34_123 --geotag_source gpx --geotag_source_path {gpx_file} {video_path} {setup_data.join("my_samples")}",
shell=True,
)
assert x.returncode != 0, x.stderr
with open(desc_path) as fp:
descs = json.load(fp)
for d in descs:
assert d["filename"].startswith("sample-5s.mp4/")
assert 1 == len(find_desc_errors(descs))
assert 2 == len(filter_out_errors(descs))
def file_md5sum(path) -> str:
with open(path, "rb") as fp:
md5 = hashlib.md5()
while True:
buf = fp.read(1024 * 1024 * 32)
if not buf:
break
md5.update(buf)
return md5.hexdigest()
def test_upload_mp4(
tmpdir: py.path.local, setup_data: py.path.local, setup_config: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
video_path = setup_data.join("sample-5s.mp4")
md5sum = file_md5sum(video_path)
x = subprocess.run(
f"{EXECUTABLE} upload {video_path} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert 1 == len(upload_dir.listdir())
assert {"mly_tools_8cd0e9af15f4baaafe9dfe98ace8b886.mp4"} == {
os.path.basename(f) for f in upload_dir.listdir()
}
assert {md5sum} == {file_md5sum(f) for f in upload_dir.listdir()}
|
import json
import os
import subprocess
import zipfile
import hashlib
import pytest
import py.path
import exifread
EXECUTABLE = os.getenv("MAPILLARY_TOOLS_EXECUTABLE", "python3 -m mapillary_tools")
IMPORT_PATH = "tests/integration/mapillary_tools_process_images_provider/data"
USERNAME = "test_username_MAKE_SURE_IT_IS_UNIQUE_AND_LONG_AND_BORING"
CONFIG_CONTENT = f"""
[{USERNAME}]
MAPSettingsUsername = {USERNAME}
MAPSettingsUserKey = test_user_key
user_upload_token = test_user_token
"""
@pytest.fixture
def setup_config(tmpdir: py.path.local):
config_path = tmpdir.mkdir("configs").join("CLIENT_ID")
with open(config_path, "w") as fp:
fp.write(CONFIG_CONTENT)
yield config_path
if tmpdir.check():
tmpdir.remove(ignore_errors=True)
@pytest.fixture
def setup_data(tmpdir: py.path.local):
data_path = tmpdir.mkdir("data")
source = py.path.local(IMPORT_PATH)
source.copy(data_path)
yield data_path
if tmpdir.check():
tmpdir.remove(ignore_errors=True)
def test_basic():
for option in ["--version", "--help"]:
x = subprocess.run(f"{EXECUTABLE} {option}", shell=True)
assert x.returncode == 0, x.stderr
def test_process(setup_data: py.path.local):
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = os.path.join(setup_data, "mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
for desc in descs:
assert "filename" in desc
assert os.path.isfile(os.path.join(setup_data, desc["filename"]))
def validate_and_extract_zip(filename: str):
basename = os.path.basename(filename)
assert basename.startswith("mly_tools_"), filename
assert basename.endswith(".zip"), filename
ret = {}
import tempfile
with zipfile.ZipFile(filename) as zipf:
with tempfile.TemporaryDirectory() as tempdir:
zipf.extractall(path=tempdir)
for name in os.listdir(tempdir):
with open(os.path.join(tempdir, name), "rb") as fp:
tags = exifread.process_file(fp)
desc_tag = tags.get("Image ImageDescription")
assert desc_tag is not None, tags
desc = json.loads(str(desc_tag.values))
assert isinstance(desc.get("MAPLatitude"), (float, int)), desc
assert isinstance(desc.get("MAPLongitude"), (float, int)), desc
assert isinstance(desc.get("MAPCaptureTime"), str), desc
assert isinstance(desc.get("MAPCompassHeading"), dict), desc
for key in desc.keys():
assert key.startswith("MAP"), key
ret[name] = desc
return ret
def test_zip(tmpdir: py.path.local, setup_data: py.path.local):
zip_dir = tmpdir.mkdir("zip_dir")
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} zip {setup_data} {zip_dir}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert 0 < len(zip_dir.listdir())
for file in zip_dir.listdir():
validate_and_extract_zip(str(file))
def test_upload_image_dir(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
assert x.returncode == 0, x.stderr
def test_upload_image_dir_twice(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
md5sum_map = {}
# first upload
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
md5sum_map[os.path.basename(file)] = file_md5sum(file)
# expect the second upload to not produce new uploads
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
new_md5sum = file_md5sum(file)
assert md5sum_map[os.path.basename(file)] == new_md5sum
assert len(md5sum_map) == len(upload_dir.listdir())
def test_upload_zip(
tmpdir: py.path.local, setup_data: py.path.local, setup_config: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
zip_dir = tmpdir.mkdir("zip_dir")
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} zip {setup_data} {zip_dir}",
shell=True,
)
assert x.returncode == 0, x.stderr
for zfile in zip_dir.listdir():
x = subprocess.run(
f"{EXECUTABLE} upload {zfile} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
def test_process_and_upload(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process_and_upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
def test_time(setup_data: py.path.local):
# before offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_10_000",
"DSC00497.JPG": "2018_06_08_13_32_28_000",
"V0370574.JPG": "2018_07_27_11_32_14_000",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
# after offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_time=2.5",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_12_500",
"DSC00497.JPG": "2018_06_08_13_32_30_500",
"V0370574.JPG": "2018_07_27_11_32_16_500",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
# after offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_time=-1.0",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_09_000",
"DSC00497.JPG": "2018_06_08_13_32_27_000",
"V0370574.JPG": "2018_07_27_11_32_13_000",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
def test_angle(setup_data: py.path.local):
# before offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": 270.89,
"DSC00497.JPG": 271.27,
"V0370574.JPG": 359.0,
}
for desc in descs:
assert "filename" in desc
assert (
abs(expected[desc["filename"]] - desc["MAPCompassHeading"]["TrueHeading"])
< 0.00001
)
assert (
abs(
expected[desc["filename"]]
- desc["MAPCompassHeading"]["MagneticHeading"]
)
< 0.00001
)
# after offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_angle=2.5",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": 270.89 + 2.5,
"DSC00497.JPG": 271.27 + 2.5,
"V0370574.JPG": 1.5,
}
for desc in descs:
assert "filename" in desc
assert (
abs(expected[desc["filename"]] - desc["MAPCompassHeading"]["TrueHeading"])
< 0.00001
)
assert (
abs(
expected[desc["filename"]]
- desc["MAPCompassHeading"]["MagneticHeading"]
)
< 0.00001
)
def test_process_boolean_options(
setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
boolean_options = [
"--add_file_name",
"--add_import_date",
"--exclude_import_path",
"--interpolate_directions",
"--overwrite_EXIF_direction_tag",
"--overwrite_EXIF_gps_tag",
"--overwrite_EXIF_orientation_tag",
"--overwrite_EXIF_time_tag",
"--overwrite_all_EXIF_tags",
"--skip_subfolders",
"--windows_path",
]
for option in boolean_options:
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} {option}",
shell=True,
)
assert x.returncode == 0, x.stderr
all_options = " ".join(boolean_options)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} {all_options}",
shell=True,
)
assert x.returncode == 0, x.stderr
GPX_CONTENT = """
<gpx>
<trk>
<name>Mapillary GPX</name>
<trkseg>
<trkpt lat="0.02" lon="0.01">
<ele>1</ele>
<time>2018-06-08T13:23:34.805</time>
</trkpt>
<trkpt lat="2.02" lon="0.01">
<ele>2</ele>
<time>2018-06-08T13:24:35.809</time>
</trkpt>
<trkpt lat="2.02" lon="2.01">
<ele>4</ele>
<time>2018-06-08T13:33:36.813</time>
</trkpt>
<trkpt lat="4.02" lon="2.01">
<ele>9</ele>
<time>2018-06-08T13:58:37.812</time>
</trkpt>
</trkseg>
</trk>
</gpx>
"""
def find_desc_errors(descs):
return [desc for desc in descs if "error" in desc]
def filter_out_errors(descs):
return [desc for desc in descs if "error" not in desc]
def test_geotagging_from_gpx(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --geotag_source_path {gpx_file} --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": [
"2018_06_08_13_24_10_000",
0.01,
1.1738587633597797,
1.5769293816798897,
],
"DSC00497.JPG": [
"2018_06_08_13_32_28_000",
1.7556100139740183,
2.02,
3.7456100139740185,
],
}
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_with_offset(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --geotag_source_path {gpx_file} --interpolation_offset_time=-20 --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": [
"2018_06_08_13_23_50_000",
0.01,
0.5181640548160776,
1.2490820274080388,
],
"DSC00497.JPG": [
"2018_06_08_13_32_08_000",
1.6816734072206487,
2.02,
3.671673407220649,
],
}
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_use_gpx_start_time(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --interpolation_use_gpx_start_time --geotag_source_path {gpx_file} --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": ["2018_06_08_13_23_34_805", 0.01, 0.02, 1.0],
"DSC00497.JPG": [
"2018_06_08_13_31_52_805",
1.6255000702397762,
2.02,
3.6155000702397766,
],
}
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_use_gpx_start_time_with_offset(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --interpolation_use_gpx_start_time --geotag_source_path {gpx_file} --interpolation_offset_time=100 --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": [
"2018_06_08_13_25_14_805",
0.15416159584772016,
2.02,
2.14416159584772,
],
"DSC00497.JPG": [
"2018_06_08_13_33_32_805",
1.9951831040066244,
2.02,
3.985183104006625,
],
}
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def ffmpeg_installed():
ffmpeg_path = os.getenv("MAPILLARY_FFMPEG_PATH", "ffmpeg")
try:
subprocess.run([ffmpeg_path, "-version"])
except FileNotFoundError:
return False
return True
is_ffmpeg_installed = ffmpeg_installed()
def test_sample_video(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
for input_path in [setup_data, setup_data.join("sample-5s.mp4")]:
x = subprocess.run(
f"{EXECUTABLE} sample_video --rerun {input_path}",
shell=True,
)
assert x.returncode != 0, x.stderr
assert len(setup_data.join("mapillary_sampled_video_frames").listdir()) == 0
x = subprocess.run(
f"{EXECUTABLE} sample_video --skip_sample_errors --rerun {input_path}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert len(setup_data.join("mapillary_sampled_video_frames").listdir()) == 0
x = subprocess.run(
f"{EXECUTABLE} sample_video --video_start_time 2021_10_10_10_10_10_123 --rerun {input_path}",
shell=True,
)
assert x.returncode == 0, x.stderr
sample_path = setup_data.join("mapillary_sampled_video_frames")
assert len(sample_path.listdir()) == 1
samples = sample_path.join("sample-5s.mp4").listdir()
samples.sort()
times = []
for s in samples:
with s.open("rb") as fp:
tags = exifread.process_file(fp)
times.append(tags["EXIF DateTimeOriginal"].values)
assert (
"2021:10:10 10:10:10.123",
"2021:10:10 10:10:12.123",
"2021:10:10 10:10:14.123",
) == tuple(times)
def test_video_process(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("my_samples").join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} video_process --video_start_time 2018_06_08_13_23_34_123 --geotag_source gpx --geotag_source_path {gpx_file} {setup_data} {setup_data.join('my_samples')}",
shell=True,
)
assert x.returncode != 0, x.stderr
with open(desc_path) as fp:
descs = json.load(fp)
assert 1 == len(find_desc_errors(descs))
assert 2 == len(filter_out_errors(descs))
def test_video_process_multiple_videos(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("my_samples").join("mapillary_image_description.json")
sub_folder = setup_data.join("video_sub_folder").mkdir()
video_path = setup_data.join("sample-5s.mp4")
video_path.copy(sub_folder)
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} video_process --video_start_time 2018_06_08_13_23_34_123 --geotag_source gpx --geotag_source_path {gpx_file} {video_path} {setup_data.join('my_samples')}",
shell=True,
)
assert x.returncode != 0, x.stderr
with open(desc_path) as fp:
descs = json.load(fp)
for d in descs:
assert d["filename"].startswith("sample-5s.mp4/")
assert 1 == len(find_desc_errors(descs))
assert 2 == len(filter_out_errors(descs))
def file_md5sum(path) -> str:
with open(path, "rb") as fp:
md5 = hashlib.md5()
while True:
buf = fp.read(1024 * 1024 * 32)
if not buf:
break
md5.update(buf)
return md5.hexdigest()
def test_upload_mp4(
tmpdir: py.path.local, setup_data: py.path.local, setup_config: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
video_path = setup_data.join("sample-5s.mp4")
md5sum = file_md5sum(video_path)
x = subprocess.run(
f"{EXECUTABLE} upload {video_path} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert 1 == len(upload_dir.listdir())
assert {"mly_tools_8cd0e9af15f4baaafe9dfe98ace8b886.mp4"} == {
os.path.basename(f) for f in upload_dir.listdir()
}
assert {md5sum} == {file_md5sum(f) for f in upload_dir.listdir()}
|
from discord.ext import commands
from discord.utils import escape_markdown
from fuzzywuzzy import process as fwp
from util.data.guild_data import GuildData
class Tags(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="settag", aliases=["edittag", "newtag", "addtag"])
@commands.cooldown(1, 5)
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def tag_set(self, ctx, tag_name: str, *, message: str):
"""
Create a new bot tag.
"""
tag_name = tag_name.lower()
message = message[:1900]
GuildData(str(ctx.guild.id)).tags.set(tag_name, message)
await ctx.send(f"Set tag `{tag_name}` to `{escape_markdown(message)}`.")
@commands.command(name="deletetag", aliases=["deltag", "tagdelete"])
@commands.cooldown(1, 5)
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def tag_delete(self, ctx, *, tag_name: str):
"""
Delete a bot tag.
"""
tag_name = tag_name.lower()
result = GuildData(str(ctx.guild.id)).tags.delete(tag_name)
if result:
await ctx.send(f"Deleted tag `{tag_name}`.")
else:
await ctx.send("Invalid tag!")
@commands.command(name="taglist", aliases=["listtags", "tags"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_list(self, ctx):
"""
List available tags for the server.
"""
guild_tags = GuildData(str(ctx.guild.id)).tags.fetch_all()
if not len(guild_tags) > 0:
await ctx.send("No tags available!")
return
tags = f"{ctx.guild.name} Server Tags\n\n"
for t in sorted(guild_tags):
value = t[2]
value = value.replace("\n", "")
tags += f"[{t[1]}] {escape_markdown(value[:100])}{"..." if len(value) > 100 else ""}\n"
parts = [(tags[i:i + 750]) for i in range(0, len(tags), 750)]
for part in parts:
part = part.replace("```", "")
await ctx.send(f"```{part}```")
@commands.command(name="tagsearch", aliases=["searchtag"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_search(self, ctx, *, tag_name: str):
"""
Search for a tag.
"""
search_results = self.handle_search(ctx, tag_name)
if len(search_results) <= 0:
await ctx.send("No search results found!")
return
results_txt = f"Tag Search Results ({tag_name})\n\n"
for (res, _) in search_results:
results_txt += f"{res}\n"
await ctx.send(f"```{results_txt}```")
@commands.command()
@commands.cooldown(1, 2)
@commands.guild_only()
async def tag(self, ctx, *, tag_name: str):
"""
Call a bot tag.
"""
tag_name = tag_name.lower()
tags = GuildData(str(ctx.guild.id)).tags
if len(tags.fetch_all()) <= 0:
await ctx.send("No tags available!")
return
# response = self.tags[str(ctx.guild.id)][tag_name]
response = tags.fetch_by_name(tag_name)
if response:
response = self.handle_variables(response, ctx)
await ctx.send(response)
else:
search_results = self.handle_search(ctx, tag_name)[:3]
results_txt = ""
for (res, _) in search_results:
results_txt += f"{res}\n"
await ctx.send(f"Couldn't find that tag. Did you mean one of the following?\n```\n{results_txt}\n```")
@commands.command(name="tagvariables", aliases=["tagvars", "variables", "vars"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_variables(self, ctx):
"""
Get the list of supported tag variables.
Tag variables are parts of a string that get replace by specific data.
"""
variables = self.get_variables(ctx)
vs = f"Tag Variables\n\n"
for v in sorted(variables):
vs += f"[{v}] Ex: {variables[str(v)]}\n"
parts = [(vs[i:i + 750]) for i in range(0, len(vs), 750)]
for part in parts:
await ctx.send(f"```{part}```")
@staticmethod
def get_variables(ctx):
variables = {
"author": ctx.author.display_name,
"author_id": ctx.author.id,
"channel": ctx.channel.name,
"command_key": ctx.prefix,
"server_id": ctx.guild.id,
"server_name": ctx.guild.name
}
return variables
def handle_variables(self, message, ctx):
variables = self.get_variables(ctx)
def to_key(v_):
return f"${{{v_}}}"
for v in variables:
message = message.replace(to_key(v), str(variables[v]))
return message
@staticmethod
def handle_search(ctx, tag_name):
options = []
for tag in GuildData(str(ctx.guild.id)).tags.fetch_all():
options.append(tag[1])
search_results = fwp.extract(tag_name, options)
return search_results
def setup(bot):
bot.add_cog(Tags(bot))
|
from discord.ext import commands
from discord.utils import escape_markdown
from fuzzywuzzy import process as fwp
from util.data.guild_data import GuildData
class Tags(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="settag", aliases=["edittag", "newtag", "addtag"])
@commands.cooldown(1, 5)
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def tag_set(self, ctx, tag_name: str, *, message: str):
"""
Create a new bot tag.
"""
tag_name = tag_name.lower()
message = message[:1900]
GuildData(str(ctx.guild.id)).tags.set(tag_name, message)
await ctx.send(f"Set tag `{tag_name}` to `{escape_markdown(message)}`.")
@commands.command(name="deletetag", aliases=["deltag", "tagdelete"])
@commands.cooldown(1, 5)
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def tag_delete(self, ctx, *, tag_name: str):
"""
Delete a bot tag.
"""
tag_name = tag_name.lower()
result = GuildData(str(ctx.guild.id)).tags.delete(tag_name)
if result:
await ctx.send(f"Deleted tag `{tag_name}`.")
else:
await ctx.send("Invalid tag!")
@commands.command(name="taglist", aliases=["listtags", "tags"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_list(self, ctx):
"""
List available tags for the server.
"""
guild_tags = GuildData(str(ctx.guild.id)).tags.fetch_all()
if not len(guild_tags) > 0:
await ctx.send("No tags available!")
return
tags = f"{ctx.guild.name} Server Tags\n\n"
for t in sorted(guild_tags):
value = t[2]
value = value.replace("\n", "")
tags += f"[{t[1]}] {escape_markdown(value[:100])}{'...' if len(value) > 100 else ''}\n"
parts = [(tags[i:i + 750]) for i in range(0, len(tags), 750)]
for part in parts:
part = part.replace("```", "")
await ctx.send(f"```{part}```")
@commands.command(name="tagsearch", aliases=["searchtag"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_search(self, ctx, *, tag_name: str):
"""
Search for a tag.
"""
search_results = self.handle_search(ctx, tag_name)
if len(search_results) <= 0:
await ctx.send("No search results found!")
return
results_txt = f"Tag Search Results ({tag_name})\n\n"
for (res, _) in search_results:
results_txt += f"{res}\n"
await ctx.send(f"```{results_txt}```")
@commands.command()
@commands.cooldown(1, 2)
@commands.guild_only()
async def tag(self, ctx, *, tag_name: str):
"""
Call a bot tag.
"""
tag_name = tag_name.lower()
tags = GuildData(str(ctx.guild.id)).tags
if len(tags.fetch_all()) <= 0:
await ctx.send("No tags available!")
return
# response = self.tags[str(ctx.guild.id)][tag_name]
response = tags.fetch_by_name(tag_name)
if response:
response = self.handle_variables(response, ctx)
await ctx.send(response)
else:
search_results = self.handle_search(ctx, tag_name)[:3]
results_txt = ""
for (res, _) in search_results:
results_txt += f"{res}\n"
await ctx.send(f"Couldn't find that tag. Did you mean one of the following?\n```\n{results_txt}\n```")
@commands.command(name="tagvariables", aliases=["tagvars", "variables", "vars"])
@commands.cooldown(1, 3)
@commands.guild_only()
async def tag_variables(self, ctx):
"""
Get the list of supported tag variables.
Tag variables are parts of a string that get replace by specific data.
"""
variables = self.get_variables(ctx)
vs = f"Tag Variables\n\n"
for v in sorted(variables):
vs += f"[{v}] Ex: {variables[str(v)]}\n"
parts = [(vs[i:i + 750]) for i in range(0, len(vs), 750)]
for part in parts:
await ctx.send(f"```{part}```")
@staticmethod
def get_variables(ctx):
variables = {
"author": ctx.author.display_name,
"author_id": ctx.author.id,
"channel": ctx.channel.name,
"command_key": ctx.prefix,
"server_id": ctx.guild.id,
"server_name": ctx.guild.name
}
return variables
def handle_variables(self, message, ctx):
variables = self.get_variables(ctx)
def to_key(v_):
return f"${{{v_}}}"
for v in variables:
message = message.replace(to_key(v), str(variables[v]))
return message
@staticmethod
def handle_search(ctx, tag_name):
options = []
for tag in GuildData(str(ctx.guild.id)).tags.fetch_all():
options.append(tag[1])
search_results = fwp.extract(tag_name, options)
return search_results
def setup(bot):
bot.add_cog(Tags(bot))
|
"""Amazon S3 Module."""
import concurrent.futures
import csv
import logging
import time
import uuid
from itertools import repeat
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
import boto3 # type: ignore
import botocore.exceptions # type: ignore
import pandas as pd # type: ignore
import pandas.io.parsers # type: ignore
import pyarrow as pa # type: ignore
import pyarrow.lib # type: ignore
import pyarrow.parquet # type: ignore
import s3fs # type: ignore
from boto3.s3.transfer import TransferConfig # type: ignore
from pandas.io.common import infer_compression # type: ignore
from awswrangler import _data_types, _utils, catalog, exceptions
_COMPRESSION_2_EXT: Dict[Optional[str], str] = {None: "", "gzip": ".gz", "snappy": ".snappy"}
_logger: logging.Logger = logging.getLogger(__name__)
def get_bucket_region(bucket: str, boto3_session: Optional[boto3.Session] = None) -> str:
"""Get bucket region name.
Parameters
----------
bucket : str
Bucket name.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
str
Region code (e.g. 'us-east-1').
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> region = wr.s3.get_bucket_region('bucket-name')
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> region = wr.s3.get_bucket_region('bucket-name', boto3_session=boto3.Session())
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
_logger.debug(f"bucket: {bucket}")
region: str = client_s3.get_bucket_location(Bucket=bucket)["LocationConstraint"]
region = "us-east-1" if region is None else region
_logger.debug(f"region: {region}")
return region
def does_object_exist(path: str, boto3_session: Optional[boto3.Session] = None) -> bool:
"""Check if object exists on S3.
Parameters
----------
path: str
S3 path (e.g. s3://bucket/key).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
bool
True if exists, False otherwise.
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> wr.s3.does_object_exist('s3://bucket/key_real')
True
>>> wr.s3.does_object_exist('s3://bucket/key_unreal')
False
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> wr.s3.does_object_exist('s3://bucket/key_real', boto3_session=boto3.Session())
True
>>> wr.s3.does_object_exist('s3://bucket/key_unreal', boto3_session=boto3.Session())
False
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
bucket: str
key: str
bucket, key = path.replace("s3://", "").split("/", 1)
try:
client_s3.head_object(Bucket=bucket, Key=key)
return True
except botocore.exceptions.ClientError as ex:
if ex.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
return False
raise ex # pragma: no cover
def list_objects(path: str, boto3_session: Optional[boto3.Session] = None) -> List[str]:
"""List Amazon S3 objects from a prefix.
Parameters
----------
path : str
S3 path (e.g. s3://bucket/prefix).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of objects paths.
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> wr.s3.list_objects('s3://bucket/prefix')
['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> wr.s3.list_objects('s3://bucket/prefix', boto3_session=boto3.Session())
['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
paginator = client_s3.get_paginator("list_objects_v2")
bucket: str
prefix: str
bucket, prefix = _utils.parse_path(path=path)
response_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix, PaginationConfig={"PageSize": 1000})
paths: List[str] = []
for page in response_iterator:
contents: Optional[List] = page.get("Contents")
if contents is not None:
for content in contents:
if (content is not None) and ("Key" in content):
key: str = content["Key"]
paths.append(f"s3://{bucket}/{key}")
return paths
def _path2list(path: Union[str, List[str]], boto3_session: Optional[boto3.Session]) -> List[str]:
if isinstance(path, str): # prefix
paths: List[str] = list_objects(path=path, boto3_session=boto3_session)
elif isinstance(path, list):
paths = path
else:
raise exceptions.InvalidArgumentType(f"{type(path)} is not a valid path type. Please, use str or List[str].")
return paths
def delete_objects(
path: Union[str, List[str]], use_threads: bool = True, boto3_session: Optional[boto3.Session] = None
) -> None:
"""Delete Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.delete_objects(['s3://bucket/key0', 's3://bucket/key1']) # Delete both objects
>>> wr.s3.delete_objects('s3://bucket/prefix') # Delete all objects under the received prefix
"""
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
buckets: Dict[str, List[str]] = _split_paths_by_bucket(paths=paths)
for bucket, keys in buckets.items():
chunks: List[List[str]] = _utils.chunkify(lst=keys, max_length=1_000)
if use_threads is False:
for chunk in chunks:
_delete_objects(bucket=bucket, keys=chunk, client_s3=client_s3)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
executor.map(_delete_objects, repeat(bucket), chunks, repeat(client_s3))
def _split_paths_by_bucket(paths: List[str]) -> Dict[str, List[str]]:
buckets: Dict[str, List[str]] = {}
bucket: str
key: str
for path in paths:
bucket, key = _utils.parse_path(path=path)
if bucket not in buckets:
buckets[bucket] = []
buckets[bucket].append(key)
return buckets
def _delete_objects(bucket: str, keys: List[str], client_s3: boto3.client) -> None:
_logger.debug(f"len(keys): {len(keys)}")
batch: List[Dict[str, str]] = [{"Key": key} for key in keys]
client_s3.delete_objects(Bucket=bucket, Delete={"Objects": batch})
def describe_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Dict[str, Any]]:
"""Describe Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Fetch attributes like ContentLength, DeleteMarker, LastModified, ContentType, etc
The full list of attributes can be explored under the boto3 head_object documentation:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
wait_time : Union[int,float], optional
How much time (seconds) should Wrangler try to reach this objects.
Very useful to overcome eventual consistence issues.
`None` means only a single try will be done.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Dict[str, Any]]
Return a dictionary of objects returned from head_objects where the key is the object path.
The response object can be explored here:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object
Examples
--------
>>> import awswrangler as wr
>>> descs0 = wr.s3.describe_objects(['s3://bucket/key0', 's3://bucket/key1']) # Describe both objects
>>> descs1 = wr.s3.describe_objects('s3://bucket/prefix') # Describe all objects under the prefix
>>> descs2 = wr.s3.describe_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues
"""
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return {}
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resp_list: List[Tuple[str, Dict[str, Any]]]
if use_threads is False:
resp_list = [_describe_object(path=p, wait_time=wait_time, client_s3=client_s3) for p in paths]
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
resp_list = list(executor.map(_describe_object, paths, repeat(wait_time), repeat(client_s3)))
desc_list: Dict[str, Dict[str, Any]] = dict(resp_list)
return desc_list
def _describe_object(
path: str, wait_time: Optional[Union[int, float]], client_s3: boto3.client
) -> Tuple[str, Dict[str, Any]]:
wait_time = int(wait_time) if isinstance(wait_time, float) else wait_time
tries: int = wait_time if (wait_time is not None) and (wait_time > 0) else 1
bucket: str
key: str
bucket, key = _utils.parse_path(path=path)
desc: Dict[str, Any] = {}
for i in range(tries, 0, -1):
try:
desc = client_s3.head_object(Bucket=bucket, Key=key)
break
except botocore.exceptions.ClientError as e: # pragma: no cover
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404: # Not Found
_logger.debug(f"Object not found. {i} seconds remaining to wait.")
if i == 1: # Last try, there is no more need to sleep
break
time.sleep(1)
else:
raise e
return path, desc
def size_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Optional[int]]:
"""Get the size (ContentLength) in bytes of Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
wait_time : Union[int,float], optional
How much time (seconds) should Wrangler try to reach this objects.
Very useful to overcome eventual consistence issues.
`None` means only a single try will be done.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Optional[int]]
Dictionary where the key is the object path and the value is the object size.
Examples
--------
>>> import awswrangler as wr
>>> sizes0 = wr.s3.size_objects(['s3://bucket/key0', 's3://bucket/key1']) # Get the sizes of both objects
>>> sizes1 = wr.s3.size_objects('s3://bucket/prefix') # Get the sizes of all objects under the received prefix
>>> sizes2 = wr.s3.size_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues
"""
desc_list: Dict[str, Dict[str, Any]] = describe_objects(
path=path, wait_time=wait_time, use_threads=use_threads, boto3_session=boto3_session
)
size_list: Dict[str, Optional[int]] = {k: d.get("ContentLength", None) for k, d in desc_list.items()}
return size_list
def to_csv( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
sep: str = ",",
index: bool = True,
columns: Optional[List[str]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write CSV file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
The table name and all column names will be automatically sanitize using
`wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
sep : str
String of length 1. Field delimiter for the output file.
index : bool
Write row names (index).
columns : List[str], optional
Columns to write.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
dataset: bool
If True store a parquet dataset instead of a single file.
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, .
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
pandas_kwargs:
keyword arguments forwarded to pandas.DataFrame.to_csv()
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_csv.html
Returns
-------
None
None.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.csv'],
'partitions_values: {}
}
"""
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
pandas_kwargs["sep"] = sep
pandas_kwargs["index"] = index
pandas_kwargs["columns"] = columns
_to_text(file_format="csv", df=df, path=path, fs=fs, **pandas_kwargs)
paths = [path]
else:
mode = "append" if mode is None else mode
exist: bool = False
if columns:
df = df[columns]
if (database is not None) and (table is not None): # Normalize table to respect Athena's standards
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_csv_dataset(
df=df,
path=path,
index=index,
sep=sep,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype, index_left=True
)
if (exist is False) or (mode == "overwrite"):
catalog.create_csv_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
sep=sep,
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_csv_partitions(
database=database, table=table, partitions_values=partitions_values, boto3_session=session, sep=sep
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_csv_dataset(
df: pd.DataFrame,
path: str,
index: bool,
sep: str,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
_logger.debug(f"dtypes: {df.dtypes}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=df,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=subgroup,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def to_json(
df: pd.DataFrame,
path: str,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
"""Write JSON file on Amazon S3.
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
pandas_kwargs:
keyword arguments forwarded to pandas.DataFrame.to_csv()
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html
Returns
-------
None
None.
Examples
--------
Writing JSON file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_json(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/filename.json',
... )
Writing CSV file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_json(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/filename.json',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
"""
return _to_text(
file_format="json",
df=df,
path=path,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
**pandas_kwargs,
)
def _to_text(
file_format: str,
df: pd.DataFrame,
path: str,
fs: Optional[s3fs.S3FileSystem] = None,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
if df.empty is True: # pragma: no cover
raise exceptions.EmptyDataFrame()
if fs is None:
fs = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
with fs.open(path, "w") as f:
if file_format == "csv":
df.to_csv(f, **pandas_kwargs)
elif file_format == "json":
df.to_json(f, **pandas_kwargs)
def to_parquet( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
index: bool = False,
compression: Optional[str] = "snappy",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write Parquet file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
The table name and all column names will be automatically sanitize using
`wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
S3 path (for file e.g. ``s3://bucket/prefix/filename.parquet``) (for dataset e.g. ``s3://bucket/prefix``).
index : bool
True to store the DataFrame index in file, otherwise False to ignore it.
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
dataset: bool
If True store a parquet dataset instead of a single file.
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, .
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
Returns
-------
Dict[str, Union[List[str], Dict[str, List[str]]]]
Dictionary with:
'paths': List of all stored files paths on S3.
'partitions_values': Dictionary of partitions added with keys as S3 path locations
and values as a list of partitions values as str.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.parquet'],
'partitions_values: {}
}
"""
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
compression_ext: Optional[str] = _COMPRESSION_2_EXT.get(compression, None)
if compression_ext is None:
raise exceptions.InvalidCompression(f"{compression} is invalid, please use None, snappy or gzip.")
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
paths = [
_to_parquet_file(
df=df, path=path, schema=None, index=index, compression=compression, cpus=cpus, fs=fs, dtype={}
)
]
else:
mode = "append" if mode is None else mode
exist: bool = False
if (database is not None) and (table is not None): # Normalize table to respect Athena's standards
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_parquet_dataset(
df=df,
path=path,
index=index,
compression=compression,
compression_ext=compression_ext,
cpus=cpus,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype
)
if (exist is False) or (mode == "overwrite"):
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
compression=compression,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_parquet_dataset(
df: pd.DataFrame,
path: str,
index: bool,
compression: Optional[str],
compression_ext: str,
cpus: int,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
schema: pa.Schema = _data_types.pyarrow_schema_from_pandas(
df=df, index=index, ignore_cols=partition_cols, dtype=dtype
)
_logger.debug(f"schema: {schema}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=df, schema=schema, path=file_path, index=index, compression=compression, cpus=cpus, fs=fs, dtype=dtype
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=subgroup,
schema=schema,
path=file_path,
index=index,
compression=compression,
cpus=cpus,
fs=fs,
dtype=dtype,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def _to_parquet_file(
df: pd.DataFrame,
path: str,
schema: pa.Schema,
index: bool,
compression: Optional[str],
cpus: int,
fs: s3fs.S3FileSystem,
dtype: Dict[str, str],
) -> str:
table: pa.Table = pyarrow.Table.from_pandas(df=df, schema=schema, nthreads=cpus, preserve_index=index, safe=True)
for col_name, col_type in dtype.items():
if col_name in table.column_names:
col_index = table.column_names.index(col_name)
pyarrow_dtype = _data_types.athena2pyarrow(col_type)
field = pa.field(name=col_name, type=pyarrow_dtype)
table = table.set_column(col_index, field, table.column(col_name).cast(pyarrow_dtype))
_logger.debug(f"Casting column {col_name} ({col_index}) to {col_type} ({pyarrow_dtype})")
pyarrow.parquet.write_table(
table=table,
where=path,
write_statistics=True,
use_dictionary=True,
filesystem=fs,
coerce_timestamps="ms",
compression=compression,
flavor="spark",
)
return path
def read_csv(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read CSV file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_csv().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all CSV files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(path='s3://bucket/prefix/')
Reading all CSV files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all CSV files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_csv,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_fwf(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read fixed-width formatted file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_fwf().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_fwf.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all fixed-width formatted (FWF) files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(path='s3://bucket/prefix/')
Reading all fixed-width formatted (FWF) files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all fixed-width formatted (FWF) files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_fwf,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_json(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read JSON file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_json().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_json.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all JSON files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_json(path='s3://bucket/prefix/')
Reading all JSON files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_json(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all JSON files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_json,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def _read_text(
parser_func: Callable,
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
if "iterator" in pandas_kwargs:
raise exceptions.InvalidArgument("Please, use chunksize instead of iterator.")
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if chunksize is not None:
dfs: Iterator[pd.DataFrame] = _read_text_chunksize(
parser_func=parser_func,
paths=paths,
boto3_session=boto3_session,
chunksize=chunksize,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
return dfs
if use_threads is False:
df: pd.DataFrame = pd.concat(
objs=[
_read_text_full(
parser_func=parser_func,
path=p,
boto3_session=boto3_session,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
for p in paths
],
ignore_index=True,
sort=False,
)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
df = pd.concat(
objs=executor.map(
_read_text_full,
repeat(parser_func),
paths,
repeat(boto3_session),
repeat(pandas_kwargs),
repeat(s3_additional_kwargs),
),
ignore_index=True,
sort=False,
)
return df
def _read_text_chunksize(
parser_func: Callable,
paths: List[str],
boto3_session: boto3.Session,
chunksize: int,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Iterator[pd.DataFrame]:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
for path in paths:
_logger.debug(f"path: {path}")
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
reader: pandas.io.parsers.TextFileReader = parser_func(f, chunksize=chunksize, **pandas_args)
for df in reader:
yield df
def _read_text_full(
parser_func: Callable,
path: str,
boto3_session: boto3.Session,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pd.DataFrame:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
return parser_func(f, **pandas_args)
def _read_parquet_init(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
categories: List[str] = None,
validate_schema: bool = True,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pyarrow.parquet.ParquetDataset:
"""Encapsulate all initialization before the use of the pyarrow.parquet.ParquetDataset."""
if dataset is False:
path_or_paths: Union[str, List[str]] = _path2list(path=path, boto3_session=boto3_session)
elif isinstance(path, str):
path_or_paths = path[:-1] if path.endswith("/") else path
else:
path_or_paths = path
_logger.debug(f"path_or_paths: {path_or_paths}")
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
data: pyarrow.parquet.ParquetDataset = pyarrow.parquet.ParquetDataset(
path_or_paths=path_or_paths,
filesystem=fs,
metadata_nthreads=cpus,
filters=filters,
read_dictionary=categories,
validate_schema=validate_schema,
)
return data
def read_parquet(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
validate_schema: bool = True,
chunked: bool = False,
dataset: bool = False,
categories: List[str] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read Apache Parquet file(s) from from a received S3 prefix or list of S3 objects paths.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
columns : List[str], optional
Names of columns to read from the file(s).
validate_schema:
Check that individual file schemas are all the same / compatible. Schemas within a
folder prefix should all be the same. Disable if you have schemas that are different
and want to disable this check.
chunked : bool
If True will break the data in smaller DataFrames (Non deterministic number of lines).
Otherwise return a single DataFrame with the whole data.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunked=True`.
Examples
--------
Reading all Parquet files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(path='s3://bucket/prefix/')
Reading all Parquet files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all Parquet files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(path=['s3://bucket/filename0.parquet', 's3://bucket/filename1.parquet'])
Reading in chunks
>>> import awswrangler as wr
>>> dfs = wr.s3.read_parquet(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunked=True)
>>> for df in dfs:
>>> print(df) # Smaller Pandas DataFrame
"""
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path,
filters=filters,
dataset=dataset,
categories=categories,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
validate_schema=validate_schema,
)
if chunked is False:
return _read_parquet(
data=data, columns=columns, categories=categories, use_threads=use_threads, validate_schema=validate_schema
)
return _read_parquet_chunked(data=data, columns=columns, categories=categories, use_threads=use_threads)
def _read_parquet(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
validate_schema: bool = True,
) -> pd.DataFrame:
tables: List[pa.Table] = []
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
tables.append(table)
promote: bool = not validate_schema
table = pa.lib.concat_tables(tables, promote=promote)
return table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def _read_parquet_chunked(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
) -> Iterator[pd.DataFrame]:
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
yield table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def read_parquet_metadata(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]]]:
"""Read Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Tuple[Dict[str, str], Optional[Dict[str, str]]]
columns_types: Dictionary with keys as column names and vales as
data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /
partitions_types: Dictionary with keys as partition names
and values as data types (e.g. {'col2': 'date'}).
Examples
--------
Reading all Parquet files (with partitions) metadata under a prefix
>>> import awswrangler as wr
>>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path='s3://bucket/prefix/', dataset=True)
Reading all Parquet files metadata from a list
>>> import awswrangler as wr
>>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path=[
... 's3://bucket/filename0.parquet',
... 's3://bucket/filename1.parquet'
... ])
"""
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=boto3_session
)
return _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=data.partitions
)
def store_parquet_metadata(
path: str,
database: str,
table: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
compression: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]:
"""Infer and store parquet metadata on AWS Glue Catalog.
Infer Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths
And then stores it on AWS Glue Catalog including all inferred partitions
(No need of 'MCSK REPAIR TABLE')
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
database : str
Glue/Athena catalog: Database name.
table : str
Glue/Athena catalog: Table name.
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``, etc).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]
The metadata used to create the Glue Table.
columns_types: Dictionary with keys as column names and vales as
data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /
partitions_types: Dictionary with keys as partition names
and values as data types (e.g. {'col2': 'date'}). /
partitions_values: Dictionary with keys as S3 path locations and values as a
list of partitions values as str (e.g. {'s3://bucket/prefix/y=2020/m=10/': ['2020', '10']}).
Examples
--------
Reading all Parquet files metadata under a prefix
>>> import awswrangler as wr
>>> columns_types, partitions_types, partitions_values = wr.s3.store_parquet_metadata(
... path='s3://bucket/prefix/',
... database='...',
... table='...',
... dataset=True
... )
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=session
)
partitions: Optional[pyarrow.parquet.ParquetPartitions] = data.partitions
columns_types, partitions_types = _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=partitions
)
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
)
partitions_values: Dict[str, List[str]] = _data_types.athena_partitions_from_pyarrow_partitions(
path=path, partitions=partitions
)
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return columns_types, partitions_types, partitions_values
def wait_objects_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Wait Amazon S3 objects exist.
Polls S3.Client.head_object() every 5 seconds (default) until a successful
state is reached. An error is returned after 20 (default) failed checks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectExists
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
delay : Union[int,float], optional
The amount of time in seconds to wait between attempts. Default: 5
max_attempts : int, optional
The maximum number of attempts to be made. Default: 20
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.wait_objects_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects
"""
return _wait_objects(
waiter_name="object_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def wait_objects_not_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Wait Amazon S3 objects not exist.
Polls S3.Client.head_object() every 5 seconds (default) until a successful
state is reached. An error is returned after 20 (default) failed checks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectNotExists
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
delay : Union[int,float], optional
The amount of time in seconds to wait between attempts. Default: 5
max_attempts : int, optional
The maximum number of attempts to be made. Default: 20
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.wait_objects_not_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects not exist
"""
return _wait_objects(
waiter_name="object_not_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def _wait_objects(
waiter_name: str,
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
delay = 5 if delay is None else delay
max_attempts = 20 if max_attempts is None else max_attempts
_delay: int = int(delay) if isinstance(delay, float) else delay
if len(paths) < 1:
return None
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
waiter = client_s3.get_waiter(waiter_name)
_paths: List[Tuple[str, str]] = [_utils.parse_path(path=p) for p in paths]
if use_threads is False:
for bucket, key in _paths:
waiter.wait(Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts})
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
futures: List[concurrent.futures.Future] = []
for bucket, key in _paths:
future: concurrent.futures.Future = executor.submit(
fn=waiter.wait, Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts}
)
futures.append(future)
for future in futures:
future.result()
return None
def read_parquet_table(
table: str,
database: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
categories: List[str] = None,
chunked: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read Apache Parquet table registered on AWS Glue Catalog.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
table : str
AWS Glue Catalog table name.
database : str
AWS Glue Catalog database name.
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
columns : List[str], optional
Names of columns to read from the file(s).
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
chunked : bool
If True will break the data in smaller DataFrames (Non deterministic number of lines).
Otherwise return a single DataFrame with the whole data.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunked=True`.
Examples
--------
Reading Parquet Table
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet_table(database='...', table='...')
Reading Parquet Table encrypted
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet_table(
... database='...',
... table='...'
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading Parquet Table in chunks
>>> import awswrangler as wr
>>> dfs = wr.s3.read_parquet_table(database='...', table='...', chunked=True)
>>> for df in dfs:
>>> print(df) # Smaller Pandas DataFrame
"""
path: str = catalog.get_table_location(database=database, table=table, boto3_session=boto3_session)
return read_parquet(
path=path,
filters=filters,
columns=columns,
categories=categories,
chunked=chunked,
dataset=True,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
)
def merge_datasets(
source_path: str,
target_path: str,
mode: str = "append",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
"""Merge a source dataset into a target dataset.
Note
----
If you are merging tables (S3 datasets + Glue Catalog metadata),
remember that you will also need to update your partitions metadata in some cases.
(e.g. wr.athena.repair_table(table='...', database='...'))
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
source_path : str,
S3 Path for the source directory.
target_path : str,
S3 Path for the target directory.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of new objects paths.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.merge_datasets(
... source_path="s3://bucket0/dir0/",
... target_path="s3://bucket1/dir1/",
... mode="append"
... )
["s3://bucket1/dir1/key0", "s3://bucket1/dir1/key1"]
"""
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
paths: List[str] = list_objects(path=f"{source_path}/", boto3_session=session)
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
if mode == "overwrite":
_logger.debug(f"Deleting to overwrite: {target_path}/")
delete_objects(path=f"{target_path}/", use_threads=use_threads, boto3_session=session)
elif mode == "overwrite_partitions":
paths_wo_prefix: List[str] = [x.replace(f"{source_path}/", "") for x in paths]
paths_wo_filename: List[str] = [f"{x.rpartition("/")[0]}/" for x in paths_wo_prefix]
partitions_paths: List[str] = list(set(paths_wo_filename))
target_partitions_paths = [f"{target_path}/{x}" for x in partitions_paths]
for path in target_partitions_paths:
_logger.debug(f"Deleting to overwrite_partitions: {path}")
delete_objects(path=path, use_threads=use_threads, boto3_session=session)
elif mode != "append":
raise exceptions.InvalidArgumentValue(f"{mode} is a invalid mode option.")
new_objects: List[str] = copy_objects(
paths=paths, source_path=source_path, target_path=target_path, use_threads=use_threads, boto3_session=session
)
_logger.debug(f"len(new_objects): {len(new_objects)}")
return new_objects
def copy_objects(
paths: List[str],
source_path: str,
target_path: str,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
"""Copy a list of S3 objects to another S3 directory.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/dir0/key0, s3://bucket/dir0/key1]).
source_path : str,
S3 Path for the source directory.
target_path : str,
S3 Path for the target directory.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of new objects paths.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.copy_objects(
... paths=["s3://bucket0/dir0/key0", "s3://bucket0/dir0/key1"])
... source_path="s3://bucket0/dir0/",
... target_path="s3://bucket1/dir1/",
... )
["s3://bucket1/dir1/key0", "s3://bucket1/dir1/key1"]
"""
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
batch: List[Tuple[str, str]] = []
new_objects: List[str] = []
for path in paths:
path_wo_prefix: str = path.replace(f"{source_path}/", "")
path_final: str = f"{target_path}/{path_wo_prefix}"
new_objects.append(path_final)
batch.append((path, path_final))
_logger.debug(f"len(new_objects): {len(new_objects)}")
_copy_objects(batch=batch, use_threads=use_threads, boto3_session=session)
return new_objects
def _copy_objects(batch: List[Tuple[str, str]], use_threads: bool, boto3_session: boto3.Session) -> None:
_logger.debug(f"len(batch): {len(batch)}")
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resource_s3: boto3.resource = _utils.resource(service_name="s3", session=boto3_session)
for source, target in batch:
source_bucket, source_key = _utils.parse_path(path=source)
copy_source: Dict[str, str] = {"Bucket": source_bucket, "Key": source_key}
target_bucket, target_key = _utils.parse_path(path=target)
resource_s3.meta.client.copy(
CopySource=copy_source,
Bucket=target_bucket,
Key=target_key,
SourceClient=client_s3,
Config=TransferConfig(num_download_attempts=15, use_threads=use_threads),
)
|
"""Amazon S3 Module."""
import concurrent.futures
import csv
import logging
import time
import uuid
from itertools import repeat
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
import boto3 # type: ignore
import botocore.exceptions # type: ignore
import pandas as pd # type: ignore
import pandas.io.parsers # type: ignore
import pyarrow as pa # type: ignore
import pyarrow.lib # type: ignore
import pyarrow.parquet # type: ignore
import s3fs # type: ignore
from boto3.s3.transfer import TransferConfig # type: ignore
from pandas.io.common import infer_compression # type: ignore
from awswrangler import _data_types, _utils, catalog, exceptions
_COMPRESSION_2_EXT: Dict[Optional[str], str] = {None: "", "gzip": ".gz", "snappy": ".snappy"}
_logger: logging.Logger = logging.getLogger(__name__)
def get_bucket_region(bucket: str, boto3_session: Optional[boto3.Session] = None) -> str:
"""Get bucket region name.
Parameters
----------
bucket : str
Bucket name.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
str
Region code (e.g. 'us-east-1').
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> region = wr.s3.get_bucket_region('bucket-name')
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> region = wr.s3.get_bucket_region('bucket-name', boto3_session=boto3.Session())
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
_logger.debug(f"bucket: {bucket}")
region: str = client_s3.get_bucket_location(Bucket=bucket)["LocationConstraint"]
region = "us-east-1" if region is None else region
_logger.debug(f"region: {region}")
return region
def does_object_exist(path: str, boto3_session: Optional[boto3.Session] = None) -> bool:
"""Check if object exists on S3.
Parameters
----------
path: str
S3 path (e.g. s3://bucket/key).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
bool
True if exists, False otherwise.
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> wr.s3.does_object_exist('s3://bucket/key_real')
True
>>> wr.s3.does_object_exist('s3://bucket/key_unreal')
False
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> wr.s3.does_object_exist('s3://bucket/key_real', boto3_session=boto3.Session())
True
>>> wr.s3.does_object_exist('s3://bucket/key_unreal', boto3_session=boto3.Session())
False
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
bucket: str
key: str
bucket, key = path.replace("s3://", "").split("/", 1)
try:
client_s3.head_object(Bucket=bucket, Key=key)
return True
except botocore.exceptions.ClientError as ex:
if ex.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
return False
raise ex # pragma: no cover
def list_objects(path: str, boto3_session: Optional[boto3.Session] = None) -> List[str]:
"""List Amazon S3 objects from a prefix.
Parameters
----------
path : str
S3 path (e.g. s3://bucket/prefix).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of objects paths.
Examples
--------
Using the default boto3 session
>>> import awswrangler as wr
>>> wr.s3.list_objects('s3://bucket/prefix')
['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']
Using a custom boto3 session
>>> import boto3
>>> import awswrangler as wr
>>> wr.s3.list_objects('s3://bucket/prefix', boto3_session=boto3.Session())
['s3://bucket/prefix0', 's3://bucket/prefix1', 's3://bucket/prefix2']
"""
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
paginator = client_s3.get_paginator("list_objects_v2")
bucket: str
prefix: str
bucket, prefix = _utils.parse_path(path=path)
response_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix, PaginationConfig={"PageSize": 1000})
paths: List[str] = []
for page in response_iterator:
contents: Optional[List] = page.get("Contents")
if contents is not None:
for content in contents:
if (content is not None) and ("Key" in content):
key: str = content["Key"]
paths.append(f"s3://{bucket}/{key}")
return paths
def _path2list(path: Union[str, List[str]], boto3_session: Optional[boto3.Session]) -> List[str]:
if isinstance(path, str): # prefix
paths: List[str] = list_objects(path=path, boto3_session=boto3_session)
elif isinstance(path, list):
paths = path
else:
raise exceptions.InvalidArgumentType(f"{type(path)} is not a valid path type. Please, use str or List[str].")
return paths
def delete_objects(
path: Union[str, List[str]], use_threads: bool = True, boto3_session: Optional[boto3.Session] = None
) -> None:
"""Delete Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.delete_objects(['s3://bucket/key0', 's3://bucket/key1']) # Delete both objects
>>> wr.s3.delete_objects('s3://bucket/prefix') # Delete all objects under the received prefix
"""
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
buckets: Dict[str, List[str]] = _split_paths_by_bucket(paths=paths)
for bucket, keys in buckets.items():
chunks: List[List[str]] = _utils.chunkify(lst=keys, max_length=1_000)
if use_threads is False:
for chunk in chunks:
_delete_objects(bucket=bucket, keys=chunk, client_s3=client_s3)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
executor.map(_delete_objects, repeat(bucket), chunks, repeat(client_s3))
def _split_paths_by_bucket(paths: List[str]) -> Dict[str, List[str]]:
buckets: Dict[str, List[str]] = {}
bucket: str
key: str
for path in paths:
bucket, key = _utils.parse_path(path=path)
if bucket not in buckets:
buckets[bucket] = []
buckets[bucket].append(key)
return buckets
def _delete_objects(bucket: str, keys: List[str], client_s3: boto3.client) -> None:
_logger.debug(f"len(keys): {len(keys)}")
batch: List[Dict[str, str]] = [{"Key": key} for key in keys]
client_s3.delete_objects(Bucket=bucket, Delete={"Objects": batch})
def describe_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Dict[str, Any]]:
"""Describe Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Fetch attributes like ContentLength, DeleteMarker, LastModified, ContentType, etc
The full list of attributes can be explored under the boto3 head_object documentation:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
wait_time : Union[int,float], optional
How much time (seconds) should Wrangler try to reach this objects.
Very useful to overcome eventual consistence issues.
`None` means only a single try will be done.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Dict[str, Any]]
Return a dictionary of objects returned from head_objects where the key is the object path.
The response object can be explored here:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_object
Examples
--------
>>> import awswrangler as wr
>>> descs0 = wr.s3.describe_objects(['s3://bucket/key0', 's3://bucket/key1']) # Describe both objects
>>> descs1 = wr.s3.describe_objects('s3://bucket/prefix') # Describe all objects under the prefix
>>> descs2 = wr.s3.describe_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues
"""
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if len(paths) < 1:
return {}
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resp_list: List[Tuple[str, Dict[str, Any]]]
if use_threads is False:
resp_list = [_describe_object(path=p, wait_time=wait_time, client_s3=client_s3) for p in paths]
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
resp_list = list(executor.map(_describe_object, paths, repeat(wait_time), repeat(client_s3)))
desc_list: Dict[str, Dict[str, Any]] = dict(resp_list)
return desc_list
def _describe_object(
path: str, wait_time: Optional[Union[int, float]], client_s3: boto3.client
) -> Tuple[str, Dict[str, Any]]:
wait_time = int(wait_time) if isinstance(wait_time, float) else wait_time
tries: int = wait_time if (wait_time is not None) and (wait_time > 0) else 1
bucket: str
key: str
bucket, key = _utils.parse_path(path=path)
desc: Dict[str, Any] = {}
for i in range(tries, 0, -1):
try:
desc = client_s3.head_object(Bucket=bucket, Key=key)
break
except botocore.exceptions.ClientError as e: # pragma: no cover
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404: # Not Found
_logger.debug(f"Object not found. {i} seconds remaining to wait.")
if i == 1: # Last try, there is no more need to sleep
break
time.sleep(1)
else:
raise e
return path, desc
def size_objects(
path: Union[str, List[str]],
wait_time: Optional[Union[int, float]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Dict[str, Optional[int]]:
"""Get the size (ContentLength) in bytes of Amazon S3 objects from a received S3 prefix or list of S3 objects paths.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
wait_time : Union[int,float], optional
How much time (seconds) should Wrangler try to reach this objects.
Very useful to overcome eventual consistence issues.
`None` means only a single try will be done.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Dict[str, Optional[int]]
Dictionary where the key is the object path and the value is the object size.
Examples
--------
>>> import awswrangler as wr
>>> sizes0 = wr.s3.size_objects(['s3://bucket/key0', 's3://bucket/key1']) # Get the sizes of both objects
>>> sizes1 = wr.s3.size_objects('s3://bucket/prefix') # Get the sizes of all objects under the received prefix
>>> sizes2 = wr.s3.size_objects('s3://bucket/prefix', wait_time=30) # Overcoming eventual consistence issues
"""
desc_list: Dict[str, Dict[str, Any]] = describe_objects(
path=path, wait_time=wait_time, use_threads=use_threads, boto3_session=boto3_session
)
size_list: Dict[str, Optional[int]] = {k: d.get("ContentLength", None) for k, d in desc_list.items()}
return size_list
def to_csv( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
sep: str = ",",
index: bool = True,
columns: Optional[List[str]] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write CSV file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
The table name and all column names will be automatically sanitize using
`wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
sep : str
String of length 1. Field delimiter for the output file.
index : bool
Write row names (index).
columns : List[str], optional
Columns to write.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
dataset: bool
If True store a parquet dataset instead of a single file.
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, .
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
pandas_kwargs:
keyword arguments forwarded to pandas.DataFrame.to_csv()
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_csv.html
Returns
-------
None
None.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.csv',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.csv'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.csv', 's3://.../col2=B/y.csv'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_csv(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.csv'],
'partitions_values: {}
}
"""
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
pandas_kwargs["sep"] = sep
pandas_kwargs["index"] = index
pandas_kwargs["columns"] = columns
_to_text(file_format="csv", df=df, path=path, fs=fs, **pandas_kwargs)
paths = [path]
else:
mode = "append" if mode is None else mode
exist: bool = False
if columns:
df = df[columns]
if (database is not None) and (table is not None): # Normalize table to respect Athena's standards
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_csv_dataset(
df=df,
path=path,
index=index,
sep=sep,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype, index_left=True
)
if (exist is False) or (mode == "overwrite"):
catalog.create_csv_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
sep=sep,
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_csv_partitions(
database=database, table=table, partitions_values=partitions_values, boto3_session=session, sep=sep
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_csv_dataset(
df: pd.DataFrame,
path: str,
index: bool,
sep: str,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
_logger.debug(f"dtypes: {df.dtypes}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=df,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}.csv"
_to_text(
file_format="csv",
df=subgroup,
path=file_path,
fs=fs,
quoting=csv.QUOTE_NONE,
escapechar="\\",
header=False,
date_format="%Y-%m-%d %H:%M:%S.%f",
index=index,
sep=sep,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def to_json(
df: pd.DataFrame,
path: str,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
"""Write JSON file on Amazon S3.
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
Amazon S3 path (e.g. s3://bucket/filename.csv).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 Session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
pandas_kwargs:
keyword arguments forwarded to pandas.DataFrame.to_csv()
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html
Returns
-------
None
None.
Examples
--------
Writing JSON file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_json(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/filename.json',
... )
Writing CSV file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_json(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/filename.json',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
"""
return _to_text(
file_format="json",
df=df,
path=path,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
**pandas_kwargs,
)
def _to_text(
file_format: str,
df: pd.DataFrame,
path: str,
fs: Optional[s3fs.S3FileSystem] = None,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
**pandas_kwargs,
) -> None:
if df.empty is True: # pragma: no cover
raise exceptions.EmptyDataFrame()
if fs is None:
fs = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
with fs.open(path, "w") as f:
if file_format == "csv":
df.to_csv(f, **pandas_kwargs)
elif file_format == "json":
df.to_json(f, **pandas_kwargs)
def to_parquet( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
index: bool = False,
compression: Optional[str] = "snappy",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
dataset: bool = False,
partition_cols: Optional[List[str]] = None,
mode: Optional[str] = None,
database: Optional[str] = None,
table: Optional[str] = None,
dtype: Optional[Dict[str, str]] = None,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
) -> Dict[str, Union[List[str], Dict[str, List[str]]]]:
"""Write Parquet file or dataset on Amazon S3.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning, casting and catalog integration (Amazon Athena/AWS Glue Catalog).
Note
----
The table name and all column names will be automatically sanitize using
`wr.catalog.sanitize_table_name` and `wr.catalog.sanitize_column_name`.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
path : str
S3 path (for file e.g. ``s3://bucket/prefix/filename.parquet``) (for dataset e.g. ``s3://bucket/prefix``).
index : bool
True to store the DataFrame index in file, otherwise False to ignore it.
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
dataset: bool
If True store a parquet dataset instead of a single file.
If True, enable all follow arguments:
partition_cols, mode, database, table, description, parameters, columns_comments, .
partition_cols: List[str], optional
List of column names that will be used to create partitions. Only takes effect if dataset=True.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
Glue/Athena catalog: Table name.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
Returns
-------
Dict[str, Union[List[str], Dict[str, List[str]]]]
Dictionary with:
'paths': List of all stored files paths on S3.
'partitions_values': Dictionary of partitions added with keys as S3 path locations
and values as a list of partitions values as str.
Examples
--------
Writing single file
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing single file encrypted with a KMS key
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path='s3://bucket/prefix/my_file.parquet',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
{
'paths': ['s3://bucket/prefix/my_file.parquet'],
'partitions_values': {}
}
Writing partitioned dataset
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2']
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset to S3 with metadata on Athena/Glue Catalog.
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B']
... }),
... path='s3://bucket/prefix',
... dataset=True,
... partition_cols=['col2'],
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... )
{
'paths': ['s3://.../col2=A/x.parquet', 's3://.../col2=B/y.parquet'],
'partitions_values: {
's3://.../col2=A/': ['A'],
's3://.../col2=B/': ['B']
}
}
Writing dataset casting empty column data type
>>> import awswrangler as wr
>>> import pandas as pd
>>> wr.s3.to_parquet(
... df=pd.DataFrame({
... 'col': [1, 2, 3],
... 'col2': ['A', 'A', 'B'],
... 'col3': [None, None, None]
... }),
... path='s3://bucket/prefix',
... dataset=True,
... database='default', # Athena/Glue database
... table='my_table' # Athena/Glue table
... dtype={'col3': 'date'}
... )
{
'paths': ['s3://.../x.parquet'],
'partitions_values: {}
}
"""
if (database is None) ^ (table is None):
raise exceptions.InvalidArgumentCombination(
"Please pass database and table arguments to be able to store the metadata into the Athena/Glue Catalog."
)
if df.empty is True:
raise exceptions.EmptyDataFrame()
session: boto3.Session = _utils.ensure_session(session=boto3_session)
partition_cols = partition_cols if partition_cols else []
dtype = dtype if dtype else {}
columns_comments = columns_comments if columns_comments else {}
partitions_values: Dict[str, List[str]] = {}
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
fs: s3fs.S3FileSystem = _utils.get_fs(session=session, s3_additional_kwargs=s3_additional_kwargs)
compression_ext: Optional[str] = _COMPRESSION_2_EXT.get(compression, None)
if compression_ext is None:
raise exceptions.InvalidCompression(f"{compression} is invalid, please use None, snappy or gzip.")
if dataset is False:
if partition_cols:
raise exceptions.InvalidArgumentCombination("Please, pass dataset=True to be able to use partition_cols.")
if mode is not None:
raise exceptions.InvalidArgumentCombination("Please pass dataset=True to be able to use mode.")
if any(arg is not None for arg in (database, table, description, parameters)):
raise exceptions.InvalidArgumentCombination(
"Please pass dataset=True to be able to use any one of these "
"arguments: database, table, description, parameters, "
"columns_comments."
)
paths = [
_to_parquet_file(
df=df, path=path, schema=None, index=index, compression=compression, cpus=cpus, fs=fs, dtype={}
)
]
else:
mode = "append" if mode is None else mode
exist: bool = False
if (database is not None) and (table is not None): # Normalize table to respect Athena's standards
df = catalog.sanitize_dataframe_columns_names(df=df)
partition_cols = [catalog.sanitize_column_name(p) for p in partition_cols]
dtype = {catalog.sanitize_column_name(k): v.lower() for k, v in dtype.items()}
columns_comments = {catalog.sanitize_column_name(k): v for k, v in columns_comments.items()}
exist = catalog.does_table_exist(database=database, table=table, boto3_session=session)
if (exist is True) and (mode in ("append", "overwrite_partitions")):
for k, v in catalog.get_table_types(database=database, table=table, boto3_session=session).items():
dtype[k] = v
df = catalog.drop_duplicated_columns(df=df)
paths, partitions_values = _to_parquet_dataset(
df=df,
path=path,
index=index,
compression=compression,
compression_ext=compression_ext,
cpus=cpus,
fs=fs,
use_threads=use_threads,
partition_cols=partition_cols,
dtype=dtype,
mode=mode,
boto3_session=session,
)
if (database is not None) and (table is not None):
columns_types, partitions_types = _data_types.athena_types_from_pandas_partitioned(
df=df, index=index, partition_cols=partition_cols, dtype=dtype
)
if (exist is False) or (mode == "overwrite"):
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
compression=compression,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
mode="overwrite",
)
if partitions_values:
_logger.debug(f"partitions_values:\n{partitions_values}")
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return {"paths": paths, "partitions_values": partitions_values}
def _to_parquet_dataset(
df: pd.DataFrame,
path: str,
index: bool,
compression: Optional[str],
compression_ext: str,
cpus: int,
fs: s3fs.S3FileSystem,
use_threads: bool,
mode: str,
dtype: Dict[str, str],
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[List[str], Dict[str, List[str]]]:
paths: List[str] = []
partitions_values: Dict[str, List[str]] = {}
path = path if path[-1] == "/" else f"{path}/"
if mode not in ["append", "overwrite", "overwrite_partitions"]:
raise exceptions.InvalidArgumentValue(
f"{mode} is a invalid mode, please use append, overwrite or overwrite_partitions."
)
if (mode == "overwrite") or ((mode == "overwrite_partitions") and (not partition_cols)):
delete_objects(path=path, use_threads=use_threads, boto3_session=boto3_session)
df = _data_types.cast_pandas_with_athena_types(df=df, dtype=dtype)
schema: pa.Schema = _data_types.pyarrow_schema_from_pandas(
df=df, index=index, ignore_cols=partition_cols, dtype=dtype
)
_logger.debug(f"schema: {schema}")
if not partition_cols:
file_path: str = f"{path}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=df, schema=schema, path=file_path, index=index, compression=compression, cpus=cpus, fs=fs, dtype=dtype
)
paths.append(file_path)
else:
for keys, subgroup in df.groupby(by=partition_cols, observed=True):
subgroup = subgroup.drop(partition_cols, axis="columns")
keys = (keys,) if not isinstance(keys, tuple) else keys
subdir = "/".join([f"{name}={val}" for name, val in zip(partition_cols, keys)])
prefix: str = f"{path}{subdir}/"
if mode == "overwrite_partitions":
delete_objects(path=prefix, use_threads=use_threads, boto3_session=boto3_session)
file_path = f"{prefix}{uuid.uuid4().hex}{compression_ext}.parquet"
_to_parquet_file(
df=subgroup,
schema=schema,
path=file_path,
index=index,
compression=compression,
cpus=cpus,
fs=fs,
dtype=dtype,
)
paths.append(file_path)
partitions_values[prefix] = [str(k) for k in keys]
return paths, partitions_values
def _to_parquet_file(
df: pd.DataFrame,
path: str,
schema: pa.Schema,
index: bool,
compression: Optional[str],
cpus: int,
fs: s3fs.S3FileSystem,
dtype: Dict[str, str],
) -> str:
table: pa.Table = pyarrow.Table.from_pandas(df=df, schema=schema, nthreads=cpus, preserve_index=index, safe=True)
for col_name, col_type in dtype.items():
if col_name in table.column_names:
col_index = table.column_names.index(col_name)
pyarrow_dtype = _data_types.athena2pyarrow(col_type)
field = pa.field(name=col_name, type=pyarrow_dtype)
table = table.set_column(col_index, field, table.column(col_name).cast(pyarrow_dtype))
_logger.debug(f"Casting column {col_name} ({col_index}) to {col_type} ({pyarrow_dtype})")
pyarrow.parquet.write_table(
table=table,
where=path,
write_statistics=True,
use_dictionary=True,
filesystem=fs,
coerce_timestamps="ms",
compression=compression,
flavor="spark",
)
return path
def read_csv(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read CSV file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_csv().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all CSV files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(path='s3://bucket/prefix/')
Reading all CSV files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all CSV files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_csv(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_csv,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_fwf(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read fixed-width formatted file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_fwf().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_fwf.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all fixed-width formatted (FWF) files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(path='s3://bucket/prefix/')
Reading all fixed-width formatted (FWF) files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all fixed-width formatted (FWF) files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_fwf(path=['s3://bucket/filename0.txt', 's3://bucket/filename1.txt'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_fwf,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def read_json(
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read JSON file(s) from from a received S3 prefix or list of S3 objects paths.
Note
----
For partial and gradual reading use the argument ``chunksize`` instead of ``iterator``.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. ``[s3://bucket/key0, s3://bucket/key1]``).
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
chunksize: int, optional
If specified, return an generator where chunksize is the number of rows to include in each chunk.
pandas_kwargs:
keyword arguments forwarded to pandas.read_json().
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_json.html
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunksize != None`.
Examples
--------
Reading all JSON files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_json(path='s3://bucket/prefix/')
Reading all JSON files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_json(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all JSON files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'])
Reading in chunks of 100 lines
>>> import awswrangler as wr
>>> dfs = wr.s3.read_json(path=['s3://bucket/filename0.json', 's3://bucket/filename1.json'], chunksize=100)
>>> for df in dfs:
>>> print(df) # 100 lines Pandas DataFrame
"""
return _read_text(
parser_func=pd.read_json,
path=path,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
chunksize=chunksize,
**pandas_kwargs,
)
def _read_text(
parser_func: Callable,
path: Union[str, List[str]],
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
chunksize: Optional[int] = None,
**pandas_kwargs,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
if "iterator" in pandas_kwargs:
raise exceptions.InvalidArgument("Please, use chunksize instead of iterator.")
paths: List[str] = _path2list(path=path, boto3_session=boto3_session)
if chunksize is not None:
dfs: Iterator[pd.DataFrame] = _read_text_chunksize(
parser_func=parser_func,
paths=paths,
boto3_session=boto3_session,
chunksize=chunksize,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
return dfs
if use_threads is False:
df: pd.DataFrame = pd.concat(
objs=[
_read_text_full(
parser_func=parser_func,
path=p,
boto3_session=boto3_session,
pandas_args=pandas_kwargs,
s3_additional_kwargs=s3_additional_kwargs,
)
for p in paths
],
ignore_index=True,
sort=False,
)
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
df = pd.concat(
objs=executor.map(
_read_text_full,
repeat(parser_func),
paths,
repeat(boto3_session),
repeat(pandas_kwargs),
repeat(s3_additional_kwargs),
),
ignore_index=True,
sort=False,
)
return df
def _read_text_chunksize(
parser_func: Callable,
paths: List[str],
boto3_session: boto3.Session,
chunksize: int,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Iterator[pd.DataFrame]:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
for path in paths:
_logger.debug(f"path: {path}")
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
reader: pandas.io.parsers.TextFileReader = parser_func(f, chunksize=chunksize, **pandas_args)
for df in reader:
yield df
def _read_text_full(
parser_func: Callable,
path: str,
boto3_session: boto3.Session,
pandas_args: Dict[str, Any],
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pd.DataFrame:
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
if pandas_args.get("compression", "infer") == "infer":
pandas_args["compression"] = infer_compression(path, compression="infer")
with fs.open(path, "rb") as f:
return parser_func(f, **pandas_args)
def _read_parquet_init(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
categories: List[str] = None,
validate_schema: bool = True,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> pyarrow.parquet.ParquetDataset:
"""Encapsulate all initialization before the use of the pyarrow.parquet.ParquetDataset."""
if dataset is False:
path_or_paths: Union[str, List[str]] = _path2list(path=path, boto3_session=boto3_session)
elif isinstance(path, str):
path_or_paths = path[:-1] if path.endswith("/") else path
else:
path_or_paths = path
_logger.debug(f"path_or_paths: {path_or_paths}")
fs: s3fs.S3FileSystem = _utils.get_fs(session=boto3_session, s3_additional_kwargs=s3_additional_kwargs)
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
data: pyarrow.parquet.ParquetDataset = pyarrow.parquet.ParquetDataset(
path_or_paths=path_or_paths,
filesystem=fs,
metadata_nthreads=cpus,
filters=filters,
read_dictionary=categories,
validate_schema=validate_schema,
)
return data
def read_parquet(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
validate_schema: bool = True,
chunked: bool = False,
dataset: bool = False,
categories: List[str] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read Apache Parquet file(s) from from a received S3 prefix or list of S3 objects paths.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
columns : List[str], optional
Names of columns to read from the file(s).
validate_schema:
Check that individual file schemas are all the same / compatible. Schemas within a
folder prefix should all be the same. Disable if you have schemas that are different
and want to disable this check.
chunked : bool
If True will break the data in smaller DataFrames (Non deterministic number of lines).
Otherwise return a single DataFrame with the whole data.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunked=True`.
Examples
--------
Reading all Parquet files under a prefix
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(path='s3://bucket/prefix/')
Reading all Parquet files under a prefix encrypted with a KMS key
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(
... path='s3://bucket/prefix/',
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading all Parquet files from a list
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet(path=['s3://bucket/filename0.parquet', 's3://bucket/filename1.parquet'])
Reading in chunks
>>> import awswrangler as wr
>>> dfs = wr.s3.read_parquet(path=['s3://bucket/filename0.csv', 's3://bucket/filename1.csv'], chunked=True)
>>> for df in dfs:
>>> print(df) # Smaller Pandas DataFrame
"""
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path,
filters=filters,
dataset=dataset,
categories=categories,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
validate_schema=validate_schema,
)
if chunked is False:
return _read_parquet(
data=data, columns=columns, categories=categories, use_threads=use_threads, validate_schema=validate_schema
)
return _read_parquet_chunked(data=data, columns=columns, categories=categories, use_threads=use_threads)
def _read_parquet(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
validate_schema: bool = True,
) -> pd.DataFrame:
tables: List[pa.Table] = []
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
tables.append(table)
promote: bool = not validate_schema
table = pa.lib.concat_tables(tables, promote=promote)
return table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def _read_parquet_chunked(
data: pyarrow.parquet.ParquetDataset,
columns: Optional[List[str]] = None,
categories: List[str] = None,
use_threads: bool = True,
) -> Iterator[pd.DataFrame]:
for piece in data.pieces:
table: pa.Table = piece.read(
columns=columns, use_threads=use_threads, partitions=data.partitions, use_pandas_metadata=False
)
yield table.to_pandas(
use_threads=use_threads,
split_blocks=True,
self_destruct=True,
integer_object_nulls=False,
date_as_object=True,
ignore_metadata=True,
categories=categories,
types_mapper=_data_types.pyarrow2pandas_extension,
)
def read_parquet_metadata(
path: Union[str, List[str]],
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]]]:
"""Read Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths.
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Tuple[Dict[str, str], Optional[Dict[str, str]]]
columns_types: Dictionary with keys as column names and vales as
data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /
partitions_types: Dictionary with keys as partition names
and values as data types (e.g. {'col2': 'date'}).
Examples
--------
Reading all Parquet files (with partitions) metadata under a prefix
>>> import awswrangler as wr
>>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path='s3://bucket/prefix/', dataset=True)
Reading all Parquet files metadata from a list
>>> import awswrangler as wr
>>> columns_types, partitions_types = wr.s3.read_parquet_metadata(path=[
... 's3://bucket/filename0.parquet',
... 's3://bucket/filename1.parquet'
... ])
"""
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=boto3_session
)
return _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=data.partitions
)
def store_parquet_metadata(
path: str,
database: str,
table: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
dataset: bool = False,
use_threads: bool = True,
description: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
columns_comments: Optional[Dict[str, str]] = None,
compression: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]:
"""Infer and store parquet metadata on AWS Glue Catalog.
Infer Apache Parquet file(s) metadata from from a received S3 prefix or list of S3 objects paths
And then stores it on AWS Glue Catalog including all inferred partitions
(No need of 'MCSK REPAIR TABLE')
The concept of Dataset goes beyond the simple idea of files and enable more
complex features like partitioning and catalog integration (AWS Glue Catalog).
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
path : Union[str, List[str]]
S3 prefix (e.g. s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
database : str
Glue/Athena catalog: Database name.
table : str
Glue/Athena catalog: Table name.
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
dataset: bool
If True read a parquet dataset instead of simple file(s) loading all the related partitions as columns.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
description: str, optional
Glue/Athena catalog: Table description
parameters: Dict[str, str], optional
Glue/Athena catalog: Key/value pairs to tag the table.
columns_comments: Dict[str, str], optional
Glue/Athena catalog:
Columns names and the related comments (e.g. {'col0': 'Column 0.', 'col1': 'Column 1.', 'col2': 'Partition.'}).
compression: str, optional
Compression style (``None``, ``snappy``, ``gzip``, etc).
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
Tuple[Dict[str, str], Optional[Dict[str, str]], Optional[Dict[str, List[str]]]]
The metadata used to create the Glue Table.
columns_types: Dictionary with keys as column names and vales as
data types (e.g. {'col0': 'bigint', 'col1': 'double'}). /
partitions_types: Dictionary with keys as partition names
and values as data types (e.g. {'col2': 'date'}). /
partitions_values: Dictionary with keys as S3 path locations and values as a
list of partitions values as str (e.g. {'s3://bucket/prefix/y=2020/m=10/': ['2020', '10']}).
Examples
--------
Reading all Parquet files metadata under a prefix
>>> import awswrangler as wr
>>> columns_types, partitions_types, partitions_values = wr.s3.store_parquet_metadata(
... path='s3://bucket/prefix/',
... database='...',
... table='...',
... dataset=True
... )
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
data: pyarrow.parquet.ParquetDataset = _read_parquet_init(
path=path, filters=filters, dataset=dataset, use_threads=use_threads, boto3_session=session
)
partitions: Optional[pyarrow.parquet.ParquetPartitions] = data.partitions
columns_types, partitions_types = _data_types.athena_types_from_pyarrow_schema(
schema=data.schema.to_arrow_schema(), partitions=partitions
)
catalog.create_parquet_table(
database=database,
table=table,
path=path,
columns_types=columns_types,
partitions_types=partitions_types,
description=description,
parameters=parameters,
columns_comments=columns_comments,
boto3_session=session,
)
partitions_values: Dict[str, List[str]] = _data_types.athena_partitions_from_pyarrow_partitions(
path=path, partitions=partitions
)
catalog.add_parquet_partitions(
database=database,
table=table,
partitions_values=partitions_values,
compression=compression,
boto3_session=session,
)
return columns_types, partitions_types, partitions_values
def wait_objects_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Wait Amazon S3 objects exist.
Polls S3.Client.head_object() every 5 seconds (default) until a successful
state is reached. An error is returned after 20 (default) failed checks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectExists
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
delay : Union[int,float], optional
The amount of time in seconds to wait between attempts. Default: 5
max_attempts : int, optional
The maximum number of attempts to be made. Default: 20
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.wait_objects_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects
"""
return _wait_objects(
waiter_name="object_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def wait_objects_not_exist(
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Wait Amazon S3 objects not exist.
Polls S3.Client.head_object() every 5 seconds (default) until a successful
state is reached. An error is returned after 20 (default) failed checks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Waiter.ObjectNotExists
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/key0, s3://bucket/key1]).
delay : Union[int,float], optional
The amount of time in seconds to wait between attempts. Default: 5
max_attempts : int, optional
The maximum number of attempts to be made. Default: 20
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.wait_objects_not_exist(['s3://bucket/key0', 's3://bucket/key1']) # wait both objects not exist
"""
return _wait_objects(
waiter_name="object_not_exists",
paths=paths,
delay=delay,
max_attempts=max_attempts,
use_threads=use_threads,
boto3_session=boto3_session,
)
def _wait_objects(
waiter_name: str,
paths: List[str],
delay: Optional[Union[int, float]] = None,
max_attempts: Optional[int] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> None:
delay = 5 if delay is None else delay
max_attempts = 20 if max_attempts is None else max_attempts
_delay: int = int(delay) if isinstance(delay, float) else delay
if len(paths) < 1:
return None
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
waiter = client_s3.get_waiter(waiter_name)
_paths: List[Tuple[str, str]] = [_utils.parse_path(path=p) for p in paths]
if use_threads is False:
for bucket, key in _paths:
waiter.wait(Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts})
else:
cpus: int = _utils.ensure_cpu_count(use_threads=use_threads)
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as executor:
futures: List[concurrent.futures.Future] = []
for bucket, key in _paths:
future: concurrent.futures.Future = executor.submit(
fn=waiter.wait, Bucket=bucket, Key=key, WaiterConfig={"Delay": _delay, "MaxAttempts": max_attempts}
)
futures.append(future)
for future in futures:
future.result()
return None
def read_parquet_table(
table: str,
database: str,
filters: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
columns: Optional[List[str]] = None,
categories: List[str] = None,
chunked: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Read Apache Parquet table registered on AWS Glue Catalog.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
table : str
AWS Glue Catalog table name.
database : str
AWS Glue Catalog database name.
filters: Union[List[Tuple], List[List[Tuple]]], optional
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``.
columns : List[str], optional
Names of columns to read from the file(s).
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
chunked : bool
If True will break the data in smaller DataFrames (Non deterministic number of lines).
Otherwise return a single DataFrame with the whole data.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to s3fs, useful for server side encryption
https://s3fs.readthedocs.io/en/latest/#serverside-encryption
Returns
-------
Union[pandas.DataFrame, Generator[pandas.DataFrame, None, None]]
Pandas DataFrame or a Generator in case of `chunked=True`.
Examples
--------
Reading Parquet Table
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet_table(database='...', table='...')
Reading Parquet Table encrypted
>>> import awswrangler as wr
>>> df = wr.s3.read_parquet_table(
... database='...',
... table='...'
... s3_additional_kwargs={
... 'ServerSideEncryption': 'aws:kms',
... 'SSEKMSKeyId': 'YOUR_KMY_KEY_ARN'
... }
... )
Reading Parquet Table in chunks
>>> import awswrangler as wr
>>> dfs = wr.s3.read_parquet_table(database='...', table='...', chunked=True)
>>> for df in dfs:
>>> print(df) # Smaller Pandas DataFrame
"""
path: str = catalog.get_table_location(database=database, table=table, boto3_session=boto3_session)
return read_parquet(
path=path,
filters=filters,
columns=columns,
categories=categories,
chunked=chunked,
dataset=True,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
)
def merge_datasets(
source_path: str,
target_path: str,
mode: str = "append",
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
"""Merge a source dataset into a target dataset.
Note
----
If you are merging tables (S3 datasets + Glue Catalog metadata),
remember that you will also need to update your partitions metadata in some cases.
(e.g. wr.athena.repair_table(table='...', database='...'))
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
source_path : str,
S3 Path for the source directory.
target_path : str,
S3 Path for the target directory.
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of new objects paths.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.merge_datasets(
... source_path="s3://bucket0/dir0/",
... target_path="s3://bucket1/dir1/",
... mode="append"
... )
["s3://bucket1/dir1/key0", "s3://bucket1/dir1/key1"]
"""
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
paths: List[str] = list_objects(path=f"{source_path}/", boto3_session=session)
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
if mode == "overwrite":
_logger.debug(f"Deleting to overwrite: {target_path}/")
delete_objects(path=f"{target_path}/", use_threads=use_threads, boto3_session=session)
elif mode == "overwrite_partitions":
paths_wo_prefix: List[str] = [x.replace(f"{source_path}/", "") for x in paths]
paths_wo_filename: List[str] = [f"{x.rpartition('/')[0]}/" for x in paths_wo_prefix]
partitions_paths: List[str] = list(set(paths_wo_filename))
target_partitions_paths = [f"{target_path}/{x}" for x in partitions_paths]
for path in target_partitions_paths:
_logger.debug(f"Deleting to overwrite_partitions: {path}")
delete_objects(path=path, use_threads=use_threads, boto3_session=session)
elif mode != "append":
raise exceptions.InvalidArgumentValue(f"{mode} is a invalid mode option.")
new_objects: List[str] = copy_objects(
paths=paths, source_path=source_path, target_path=target_path, use_threads=use_threads, boto3_session=session
)
_logger.debug(f"len(new_objects): {len(new_objects)}")
return new_objects
def copy_objects(
paths: List[str],
source_path: str,
target_path: str,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
) -> List[str]:
"""Copy a list of S3 objects to another S3 directory.
Note
----
In case of `use_threads=True` the number of threads that will be spawned will be get from os.cpu_count().
Parameters
----------
paths : List[str]
List of S3 objects paths (e.g. [s3://bucket/dir0/key0, s3://bucket/dir0/key1]).
source_path : str,
S3 Path for the source directory.
target_path : str,
S3 Path for the target directory.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
List[str]
List of new objects paths.
Examples
--------
>>> import awswrangler as wr
>>> wr.s3.copy_objects(
... paths=["s3://bucket0/dir0/key0", "s3://bucket0/dir0/key1"])
... source_path="s3://bucket0/dir0/",
... target_path="s3://bucket1/dir1/",
... )
["s3://bucket1/dir1/key0", "s3://bucket1/dir1/key1"]
"""
_logger.debug(f"len(paths): {len(paths)}")
if len(paths) < 1:
return []
source_path = source_path[:-1] if source_path[-1] == "/" else source_path
target_path = target_path[:-1] if target_path[-1] == "/" else target_path
session: boto3.Session = _utils.ensure_session(session=boto3_session)
batch: List[Tuple[str, str]] = []
new_objects: List[str] = []
for path in paths:
path_wo_prefix: str = path.replace(f"{source_path}/", "")
path_final: str = f"{target_path}/{path_wo_prefix}"
new_objects.append(path_final)
batch.append((path, path_final))
_logger.debug(f"len(new_objects): {len(new_objects)}")
_copy_objects(batch=batch, use_threads=use_threads, boto3_session=session)
return new_objects
def _copy_objects(batch: List[Tuple[str, str]], use_threads: bool, boto3_session: boto3.Session) -> None:
_logger.debug(f"len(batch): {len(batch)}")
client_s3: boto3.client = _utils.client(service_name="s3", session=boto3_session)
resource_s3: boto3.resource = _utils.resource(service_name="s3", session=boto3_session)
for source, target in batch:
source_bucket, source_key = _utils.parse_path(path=source)
copy_source: Dict[str, str] = {"Bucket": source_bucket, "Key": source_key}
target_bucket, target_key = _utils.parse_path(path=target)
resource_s3.meta.client.copy(
CopySource=copy_source,
Bucket=target_bucket,
Key=target_key,
SourceClient=client_s3,
Config=TransferConfig(num_download_attempts=15, use_threads=use_threads),
)
|
import random
import string
from typing import Dict
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import sys
import traceback
import json
import os
import hashlib
from datetime import timedelta
from io import StringIO
import logging
import warnings
import email
from requests.exceptions import ConnectionError
from collections import deque
from multiprocessing import Process
import exchangelib
from exchangelib.errors import (
ErrorItemNotFound,
ResponseMessageError,
RateLimitError,
ErrorInvalidIdMalformed,
ErrorFolderNotFound,
ErrorMailboxStoreUnavailable,
ErrorMailboxMoveInProgress,
ErrorNameResolutionNoResults,
MalformedResponseError,
)
from exchangelib.items import Item, Message, Contact
from exchangelib.services.common import EWSService, EWSAccountService
from exchangelib.util import create_element, add_xml_child, MNS, TNS
from exchangelib import (
IMPERSONATION,
Account,
EWSDateTime,
EWSTimeZone,
Configuration,
FileAttachment,
Version,
Folder,
HTMLBody,
Body,
ItemAttachment,
OAUTH2,
OAuth2AuthorizationCodeCredentials,
Identity,
ExtendedProperty
)
from oauthlib.oauth2 import OAuth2Token
from exchangelib.version import EXCHANGE_O365
from exchangelib.protocol import BaseProtocol, NoVerifyHTTPAdapter
# Ignore warnings print to stdout
warnings.filterwarnings("ignore")
""" Constants """
APP_NAME = "ms-ews-o365"
FOLDER_ID_LEN = 120
MAX_INCIDENTS_PER_FETCH = 50
# move results
MOVED_TO_MAILBOX = "movedToMailbox"
MOVED_TO_FOLDER = "movedToFolder"
# item types
FILE_ATTACHMENT_TYPE = "FileAttachment"
ITEM_ATTACHMENT_TYPE = "ItemAttachment"
ATTACHMENT_TYPE = "attachmentType"
TOIS_PATH = "/root/Top of Information Store/"
# context keys
ATTACHMENT_ID = "attachmentId"
ATTACHMENT_ORIGINAL_ITEM_ID = "originalItemId"
NEW_ITEM_ID = "newItemId"
MESSAGE_ID = "messageId"
ITEM_ID = "itemId"
ACTION = "action"
MAILBOX = "mailbox"
MAILBOX_ID = "mailboxId"
FOLDER_ID = "id"
TARGET_MAILBOX = 'receivedBy'
# context paths
CONTEXT_UPDATE_EWS_ITEM = f"EWS.Items((val.{ITEM_ID} === obj.{ITEM_ID} || " \
f"(val.{MESSAGE_ID} && obj.{MESSAGE_ID} && val.{MESSAGE_ID} === obj.{MESSAGE_ID}))" \
f" && val.{TARGET_MAILBOX} === obj.{TARGET_MAILBOX})"
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT = "EWS.Items(val.{0} == obj.{1})".format(
ITEM_ID, ATTACHMENT_ORIGINAL_ITEM_ID
)
CONTEXT_UPDATE_ITEM_ATTACHMENT = ".ItemAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FILE_ATTACHMENT = ".FileAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FOLDER = "EWS.Folders(val.{0} == obj.{0})".format(FOLDER_ID)
# fetch params
LAST_RUN_TIME = "lastRunTime"
LAST_RUN_IDS = "ids"
LAST_RUN_FOLDER = "folderName"
ERROR_COUNTER = "errorCounter"
# headers
ITEMS_RESULTS_HEADERS = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"textBody",
]
UTF_8 = 'utf-8'
""" Classes """
class ProxyAdapter(requests.adapters.HTTPAdapter):
"""
Proxy Adapter used to add PROXY to requests
"""
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class InsecureProxyAdapter(NoVerifyHTTPAdapter):
"""
Insecure Proxy Adapter used to add PROXY and INSECURE to requests
NoVerifyHTTPAdapter is a built-in insecure HTTPAdapter class
"""
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class EWSClient:
def __init__(
self,
default_target_mailbox,
client_id,
client_secret,
tenant_id,
folder="Inbox",
is_public_folder=False,
request_timeout="120",
max_fetch=MAX_INCIDENTS_PER_FETCH,
self_deployed=True,
insecure=True,
proxy=False,
**kwargs,
):
"""
Client used to communicate with EWS
:param default_target_mailbox: Email address from which to fetch incidents
:param client_id: Application client ID
:param client_secret: Application client secret
:param folder: Name of the folder from which to fetch incidents
:param is_public_folder: Public Folder flag
:param request_timeout: Timeout (in seconds) for HTTP requests to Exchange Server
:param max_fetch: Max incidents per fetch
:param insecure: Trust any certificate (not secure)
"""
BaseProtocol.TIMEOUT = int(request_timeout)
self.ews_server = "https://outlook.office365.com/EWS/Exchange.asmx/"
self.ms_client = MicrosoftClient(
tenant_id=tenant_id,
auth_id=client_id,
enc_key=client_secret,
app_name=APP_NAME,
base_url=self.ews_server,
verify=not insecure,
proxy=proxy,
self_deployed=self_deployed,
scope="https://outlook.office.com/.default",
)
self.folder_name = folder
self.is_public_folder = is_public_folder
self.access_type = kwargs.get('access_type') or IMPERSONATION
self.max_fetch = min(MAX_INCIDENTS_PER_FETCH, int(max_fetch))
self.last_run_ids_queue_size = 500
self.client_id = client_id
self.client_secret = client_secret
self.account_email = default_target_mailbox
self.config = self.__prepare(insecure)
self.protocol = BaseProtocol(self.config)
def __prepare(self, insecure):
"""
Prepares the client PROTOCOL, CREDENTIALS and CONFIGURATION
:param insecure: Trust any certificate (not secure)
:return: OAuth 2 Configuration
"""
BaseProtocol.HTTP_ADAPTER_CLS = InsecureProxyAdapter if insecure else ProxyAdapter
access_token = self.ms_client.get_access_token()
oauth2_token = OAuth2Token({"access_token": access_token})
self.credentials = credentials = OAuth2AuthorizationCodeCredentials(
client_id=self.client_id,
client_secret=self.client_secret,
access_token=oauth2_token,
)
# need to add identity for protocol OAuth header
self.credentials.identity = Identity(upn=self.account_email)
config_args = {
"credentials": credentials,
"auth_type": OAUTH2,
"version": Version(EXCHANGE_O365),
"service_endpoint": "https://outlook.office365.com/EWS/Exchange.asmx",
}
return Configuration(**config_args)
def get_account(self, target_mailbox=None):
"""
Request an account from EWS
:param (Optional) target_mailbox: Mailbox associated with the requested account
:return: exchangelib Account
"""
if not target_mailbox:
target_mailbox = self.account_email
return Account(
primary_smtp_address=target_mailbox,
autodiscover=False,
config=self.config,
access_type=self.access_type,
)
def get_items_from_mailbox(self, account, item_ids):
"""
Request specific items from a mailbox associated with an account
:param account: EWS account or target_mailbox associated with that account
:param item_ids: item_ids of the requested items
:return: list of exchangelib Items
"""
# allow user to pass target_mailbox as account
if isinstance(account, str):
account = self.get_account(account)
else:
account = self.get_account(self.account_email)
if type(item_ids) is not list:
item_ids = [item_ids]
items = [Item(id=x) for x in item_ids]
result = list(account.fetch(ids=items))
result = [x for x in result if not isinstance(x, ErrorItemNotFound)]
if len(result) != len(item_ids):
raise Exception(
"One or more items were not found. Check the input item ids"
)
return result
def get_item_from_mailbox(self, account, item_id):
"""
Request a single item from a mailbox associated with an account
:param account: EWS account or target_mailbox associated with that account
:param item_id: item_id of the requested item
:return: exchangelib Item
"""
result = self.get_items_from_mailbox(account, [item_id])
if len(result) == 0:
raise Exception(f"ItemId {str(item_id)} not found")
return result[0]
def get_attachments_for_item(self, item_id, account, attachment_ids=None):
"""
Request attachments for an item
:param item_id: item_id of the item to retrieve attachments from
:param account: EWS account or target_mailbox associated with that account
:param (Optional) attachment_ids: attachment_ids: attachment_ids to retrieve
:return: list of exchangelib Item.attachments
"""
item = self.get_item_from_mailbox(account, item_id)
attachments = []
attachment_ids = argToList(attachment_ids)
if item:
if item.attachments:
for attachment in item.attachments:
if (
attachment_ids
and attachment.attachment_id.id not in attachment_ids
):
continue
attachments.append(attachment)
else:
raise Exception("Message item not found: " + item_id)
if attachment_ids and len(attachments) < len(attachment_ids):
raise Exception(
"Some attachment id did not found for message:" + str(attachment_ids)
)
return attachments
def is_default_folder(self, folder_path, is_public=None):
"""
Is the given folder_path public
:param folder_path: folder path to check if is public
:param is_public: (Optional) if provided, will return this value
:return: Boolean
"""
if is_public is not None:
return is_public
if folder_path == self.folder_name:
return self.is_public_folder
return False
def get_folder_by_path(self, path, account=None, is_public=False):
"""
Retrieve folder by path
:param path: path of the folder
:param account: account associated with the requested path
:param is_public: is the requested folder public
:return: exchangelib Folder
"""
if account is None:
account = self.get_account()
# handle exchange folder id
if len(path) == FOLDER_ID_LEN:
folders_map = account.root._folders_map
if path in folders_map:
return account.root._folders_map[path]
if is_public:
folder_result = account.public_folders_root
elif path == "AllItems":
folder_result = account.root
else:
folder_result = account.inbox.parent # Top of Information Store
path = path.replace("/", "\\")
path = path.split("\\")
for sub_folder_name in path:
folder_filter_by_name = [
x
for x in folder_result.children
if x.name.lower() == sub_folder_name.lower()
]
if len(folder_filter_by_name) == 0:
raise Exception(f"No such folder {path}")
folder_result = folder_filter_by_name[0]
return folder_result
def send_email(self, message: Message):
account = self.get_account()
message.account = account
message.send_and_save()
class MarkAsJunk(EWSAccountService):
"""
EWSAccountService class used for marking items as junk
"""
SERVICE_NAME = "MarkAsJunk"
def call(self, item_id, move_item):
elements = list(
self._get_elements(
payload=self.get_payload(item_id=item_id, move_item=move_item)
)
)
for element in elements:
if isinstance(element, ResponseMessageError):
return str(element)
return "Success"
def get_payload(self, item_id, move_item):
junk = create_element(
f"m:{self.SERVICE_NAME}",
{"IsJunk": "true", "MoveItem": "true" if move_item else "false"},
)
items_list = create_element("m:ItemIds")
item_element = create_element("t:ItemId", {"Id": item_id})
items_list.append(item_element)
junk.append(items_list)
return junk
class GetSearchableMailboxes(EWSService):
"""
EWSAccountService class used for getting Searchable Mailboxes
"""
SERVICE_NAME = "GetSearchableMailboxes"
element_container_name = f"{{{MNS}}}SearchableMailboxes"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}PrimarySmtpAddress").text
if element.find(f"{{{TNS}}}PrimarySmtpAddress") is not None
else None,
MAILBOX_ID: element.find(f"{{{TNS}}}ReferenceId").text
if element.find(f"{{{TNS}}}ReferenceId") is not None
else None,
"displayName": element.find(f"{{{TNS}}}DisplayName").text
if element.find(f"{{{TNS}}}DisplayName") is not None
else None,
"isExternal": element.find(f"{{{TNS}}}IsExternalMailbox").text
if element.find(f"{{{TNS}}}IsExternalMailbox") is not None
else None,
"externalEmailAddress": element.find(f"{{{TNS}}}ExternalEmailAddress").text
if element.find(f"{{{TNS}}}ExternalEmailAddress") is not None
else None,
}
def call(self):
elements = self._get_elements(payload=self.get_payload())
return [
self.parse_element(x)
for x in elements
if x.find(f"{{{TNS}}}ReferenceId").text
]
def get_payload(self):
element = create_element(f"m:{self.SERVICE_NAME}")
return element
class ExpandGroup(EWSService):
"""
EWSAccountService class used for expanding groups
"""
SERVICE_NAME = "ExpandDL"
element_container_name = f"{{{MNS}}}DLExpansion"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}EmailAddress").text
if element.find(f"{{{TNS}}}EmailAddress") is not None
else None,
"displayName": element.find(f"{{{TNS}}}Name").text
if element.find(f"{{{TNS}}}Name") is not None
else None,
"mailboxType": element.find(f"{{{TNS}}}MailboxType").text
if element.find(f"{{{TNS}}}MailboxType") is not None
else None,
}
def call(self, email_address, recursive_expansion=False):
try:
if recursive_expansion == "True":
group_members: Dict = {}
self.expand_group_recursive(email_address, group_members)
return list(group_members.values())
else:
return self.expand_group(email_address)
except ErrorNameResolutionNoResults:
demisto.results("No results were found.")
sys.exit()
def get_payload(self, email_address):
element = create_element(f"m:{self.SERVICE_NAME}")
mailbox_element = create_element("m:Mailbox")
add_xml_child(mailbox_element, "t:EmailAddress", email_address)
element.append(mailbox_element)
return element
def expand_group(self, email_address):
"""
Expand given group
:param email_address: email address of the group to expand
:return: list dict with parsed expanded group data
"""
elements = self._get_elements(payload=self.get_payload(email_address))
return [self.parse_element(x) for x in elements]
def expand_group_recursive(self, email_address, non_dl_emails, dl_emails=None):
"""
Expand group recursively
:param email_address: email address of the group to expand
:param non_dl_emails: non distribution only emails
:param dl_emails: (Optional) distribution only emails
:return: Set of dl emails and non dl emails (returned via reference)
"""
if dl_emails is None:
dl_emails = set()
if email_address in non_dl_emails or email_address in dl_emails:
return None
dl_emails.add(email_address)
for member in self.expand_group(email_address):
if (
member["mailboxType"] == "PublicDL"
or member["mailboxType"] == "PrivateDL"
):
self.expand_group_recursive(member.get("mailbox"), non_dl_emails, dl_emails)
else:
if member["mailbox"] not in non_dl_emails:
non_dl_emails[member["mailbox"]] = member
# If you are modifying this probably also need to modify in other files
def exchangelib_cleanup():
key_protocols = list(exchangelib.protocol.CachingProtocol._protocol_cache.items())
try:
exchangelib.close_connections()
except Exception as ex:
demisto.error("Error was found in exchangelib cleanup, ignoring: {}".format(ex))
for key, protocol in key_protocols:
try:
if "thread_pool" in protocol.__dict__:
demisto.debug(
"terminating thread pool key{} id: {}".format(
key, id(protocol.thread_pool)
)
)
protocol.thread_pool.terminate()
del protocol.__dict__["thread_pool"]
else:
demisto.info(
"Thread pool not found (ignoring terminate) in protcol dict: {}".format(
dir(protocol.__dict__)
)
)
except Exception as ex:
demisto.error("Error with thread_pool.terminate, ignoring: {}".format(ex))
""" LOGGING """
log_stream = None
log_handler = None
def start_logging():
global log_stream
global log_handler
logging.raiseExceptions = False
if log_stream is None:
log_stream = StringIO()
log_handler = logging.StreamHandler(stream=log_stream)
log_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logger = logging.getLogger()
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG)
""" Helper Functions """
def get_attachment_name(attachment_name):
"""
Retrieve attachment name or error string if none is provided
:param attachment_name: attachment name to retrieve
:return: string
"""
if attachment_name is None or attachment_name == "":
return "demisto_untitled_attachment"
return attachment_name
def get_entry_for_object(title, context_key, obj, headers=None):
"""
Create an entry for a given object
:param title: Title of the human readable
:param context_key: Context key used for entry context
:param obj: Object to create entry for
:param headers: (Optional) headers used in the tableToMarkDown
:return: Entry object to be used with demisto.results()
"""
if len(obj) == 0:
return "There is no output results"
if headers and isinstance(obj, dict):
headers = list(set(headers).intersection(set(obj.keys())))
return {
"Type": entryTypes["note"],
"Contents": obj,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": tableToMarkdown(title, obj, headers),
"EntryContext": {context_key: obj},
}
def prepare_args(args):
"""
Prepare arguments to be used as the API expects it
:param args: demisto args
:return: transformed args
"""
args = dict((k.replace("-", "_"), v) for k, v in list(args.items()))
if "is_public" in args:
args["is_public"] = args["is_public"] == "True"
return args
def get_limited_number_of_messages_from_qs(qs, limit):
"""
Retrieve a limited number of messages from query search
:param qs: query search to execute
:param limit: limit on number of items to retrieve from search
:return: list of exchangelib.Message
"""
count = 0
results = []
for item in qs:
if count == limit:
break
if isinstance(item, Message):
count += 1
results.append(item)
return results
def keys_to_camel_case(value):
"""
Transform keys from snake to camel case (does nothing if no snakes are found)
:param value: value to transform
:return: transformed value
"""
def str_to_camel_case(snake_str):
components = snake_str.split("_")
return components[0] + "".join(x.title() for x in components[1:])
if value is None:
return None
if isinstance(value, (list, set)):
return list(map(keys_to_camel_case, value))
if isinstance(value, dict):
return dict(
(
keys_to_camel_case(k),
keys_to_camel_case(v) if isinstance(v, (list, dict)) else v,
)
for (k, v) in list(value.items())
)
return str_to_camel_case(value)
def get_last_run(client: EWSClient, last_run=None):
"""
Retrieve the last run time
:param client: EWS Client
:param last_run: (Optional) last run object
:return: last run dict
"""
if not last_run or last_run.get(LAST_RUN_FOLDER) != client.folder_name:
last_run = {
LAST_RUN_TIME: None,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: [],
}
if LAST_RUN_TIME in last_run and last_run[LAST_RUN_TIME] is not None:
last_run[LAST_RUN_TIME] = EWSDateTime.from_string(last_run[LAST_RUN_TIME])
# In case we have existing last_run data
if last_run.get(LAST_RUN_IDS) is None:
last_run[LAST_RUN_IDS] = []
return last_run
def email_ec(item):
"""
Create entry context for an email
:param item: exchangelib.Item
:return: entry context dict
"""
return {
"CC": None
if not item.cc_recipients
else [mailbox.email_address for mailbox in item.cc_recipients],
"BCC": None
if not item.bcc_recipients
else [mailbox.email_address for mailbox in item.bcc_recipients],
"To": None
if not item.to_recipients
else [mailbox.email_address for mailbox in item.to_recipients],
"From": item.author.email_address,
"Subject": item.subject,
"Text": item.text_body,
"HTML": item.body,
"HeadersMap": {header.name: header.value for header in item.headers},
}
def parse_item_as_dict(item, email_address=None, camel_case=False, compact_fields=False):
"""
Parses an exchangelib item as a dict
:param item: exchangelib.Item to parse
:param (Optional) email_address: string mailbox
:param (Optional) camel_case: Is camel case
:param (Optional) compact_fields: Is compact fields
:return: Item as a dict
"""
def parse_object_as_dict(obj):
raw_dict = {}
if obj is not None:
for field in obj.FIELDS:
raw_dict[field.name] = getattr(obj, field.name, None)
return raw_dict
def parse_folder_as_json(folder):
raw_dict = parse_object_as_dict(folder)
if "parent_folder_id" in raw_dict:
raw_dict["parent_folder_id"] = parse_folder_as_json(
raw_dict["parent_folder_id"]
)
if "effective_rights" in raw_dict:
raw_dict["effective_rights"] = parse_object_as_dict(
raw_dict["effective_rights"]
)
return raw_dict
raw_dict = {}
for field, value in item._field_vals():
if type(value) in [str, str, int, float, bool, Body, HTMLBody, None]:
raw_dict[field] = value
raw_dict["id"] = item.id
if getattr(item, "attachments", None):
raw_dict["attachments"] = [
parse_attachment_as_dict(item.id, x) for x in item.attachments
]
for time_field in [
"datetime_sent",
"datetime_created",
"datetime_received",
"last_modified_time",
"reminder_due_by",
]:
value = getattr(item, time_field, None)
if value:
raw_dict[time_field] = value.ewsformat()
for dict_field in [
"effective_rights",
"parent_folder_id",
"conversation_id",
"author",
"extern_id",
"received_by",
"received_representing",
"reply_to",
"sender",
"folder",
]:
value = getattr(item, dict_field, None)
if value:
if isinstance(value, list):
raw_dict[dict_field] = []
for single_val in value:
raw_dict[dict_field].append(parse_object_as_dict(single_val))
else:
raw_dict[dict_field] = parse_object_as_dict(value)
for list_dict_field in ["headers", "cc_recipients", "to_recipients"]:
value = getattr(item, list_dict_field, None)
if value:
raw_dict[list_dict_field] = [parse_object_as_dict(x) for x in value]
if getattr(item, "folder", None):
raw_dict["folder"] = parse_folder_as_json(item.folder)
folder_path = (
item.folder.absolute[len(TOIS_PATH):]
if item.folder.absolute.startswith(TOIS_PATH)
else item.folder.absolute
)
raw_dict["folder_path"] = folder_path
if compact_fields:
new_dict = {}
# noinspection PyListCreation
fields_list = [
"datetime_created",
"datetime_received",
"datetime_sent",
"sender",
"has_attachments",
"importance",
"message_id",
"last_modified_time",
"size",
"subject",
"text_body",
"headers",
"body",
"folder_path",
"is_read",
]
if "id" in raw_dict:
new_dict["itemId"] = raw_dict["id"]
fields_list.append("itemId")
for field in fields_list:
if field in raw_dict:
new_dict[field] = raw_dict.get(field)
for field in ["received_by", "author", "sender"]:
if field in raw_dict:
new_dict[field] = raw_dict.get(field, {}).get("email_address")
for field in ["to_recipients"]:
if field in raw_dict:
new_dict[field] = [x.get("email_address") for x in raw_dict[field]]
attachments = raw_dict.get("attachments")
if attachments and len(attachments) > 0:
file_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == FILE_ATTACHMENT_TYPE
]
if len(file_attachments) > 0:
new_dict["FileAttachments"] = file_attachments
item_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == ITEM_ATTACHMENT_TYPE
]
if len(item_attachments) > 0:
new_dict["ItemAttachments"] = item_attachments
raw_dict = new_dict
if camel_case:
raw_dict = keys_to_camel_case(raw_dict)
if email_address:
raw_dict[MAILBOX] = email_address
return raw_dict
def get_entry_for_file_attachment(item_id, attachment):
"""
Creates a file entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: file entry dict for attachment
"""
entry = fileResult(get_attachment_name(attachment.name), attachment.content)
entry["EntryContext"] = {
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT
+ CONTEXT_UPDATE_FILE_ATTACHMENT: parse_attachment_as_dict(item_id, attachment)
}
return entry
def parse_attachment_as_dict(item_id, attachment):
"""
Creates a note entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: note entry dict for attachment
"""
try:
attachment_content = (
attachment.content
if isinstance(attachment, FileAttachment)
else attachment.item.mime_content
)
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": hashlib.sha256(attachment_content).hexdigest()
if attachment_content
else None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
def get_entry_for_item_attachment(item_id, attachment, target_email):
"""
Creates a note entry for an item attachment
:param item_id: Item id
:param attachment: exchangelib attachment
:param target_email: target email
:return: note entry dict for item attachment
"""
item = attachment.item
dict_result = parse_attachment_as_dict(item_id, attachment)
dict_result.update(
parse_item_as_dict(item, target_email, camel_case=True, compact_fields=True)
)
title = f'EWS get attachment got item for "{target_email}", "{get_attachment_name(attachment.name)}"'
return get_entry_for_object(
title,
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT + CONTEXT_UPDATE_ITEM_ATTACHMENT,
dict_result,
)
""" Command Functions """
def get_expanded_group(client: EWSClient, email_address, recursive_expansion=False):
"""
Retrieve expanded group command
:param client: EWS Client
:param email_address: Email address of the group to expand
:param (Optional) recursive_expansion: Whether to enable recursive expansion. Default is "False".
:return: Expanded groups output tuple
"""
group_members = ExpandGroup(protocol=client.protocol).call(
email_address, recursive_expansion
)
group_details = {"name": email_address, "members": group_members}
output = {"EWS.ExpandGroup": group_details}
readable_output = tableToMarkdown("Group Members", group_members)
return readable_output, output, group_details
def get_searchable_mailboxes(client: EWSClient):
"""
Retrieve searchable mailboxes command
:param client: EWS Client
:return: Searchable mailboxes output tuple
"""
searchable_mailboxes = GetSearchableMailboxes(protocol=client.protocol).call()
readable_output = tableToMarkdown(
"Searchable mailboxes", searchable_mailboxes, headers=["displayName", "mailbox"]
)
output = {"EWS.Mailboxes": searchable_mailboxes}
return readable_output, output, searchable_mailboxes
def delete_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
"""
Deletes attachments for a given message
:param client: EWS Client
:param item_id: item id
:param (Optional) target_mailbox: target mailbox
:param (Optional) attachment_ids: attachment ids to delete
:return: entries that were delted
"""
attachments = client.get_attachments_for_item(
item_id, target_mailbox, attachment_ids
)
deleted_file_attachments = []
deleted_item_attachments = [] # type: ignore
for attachment in attachments:
attachment_deleted_action = {
ATTACHMENT_ID: attachment.attachment_id.id,
ACTION: "deleted",
}
if isinstance(attachment, FileAttachment):
deleted_file_attachments.append(attachment_deleted_action)
else:
deleted_item_attachments.append(attachment_deleted_action)
attachment.detach()
entries = []
if len(deleted_file_attachments) > 0:
entry = get_entry_for_object(
"Deleted file attachments",
"EWS.Items" + CONTEXT_UPDATE_FILE_ATTACHMENT,
deleted_file_attachments,
)
entries.append(entry)
if len(deleted_item_attachments) > 0:
entry = get_entry_for_object(
"Deleted item attachments",
"EWS.Items" + CONTEXT_UPDATE_ITEM_ATTACHMENT,
deleted_item_attachments,
)
entries.append(entry)
return entries
def fetch_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
"""
Fetches attachments for a message
:param client: EWS Client
:param item_id: item id
:param (Optional) target_mailbox: target mailbox
:param (Optional) attachment_ids: attachment ids
:return: list of parsed entries
"""
account = client.get_account(target_mailbox)
attachments = client.get_attachments_for_item(item_id, account, attachment_ids)
entries = []
for attachment in attachments:
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
entries.append(get_entry_for_file_attachment(item_id, attachment))
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
else:
entries.append(
get_entry_for_item_attachment(
item_id, attachment, account.primary_smtp_address
)
)
if attachment.item.mime_content:
entries.append(
fileResult(
get_attachment_name(attachment.name) + ".eml",
attachment.item.mime_content,
)
)
return entries
def move_item_between_mailboxes(
client: EWSClient,
item_id,
destination_mailbox,
destination_folder_path,
source_mailbox=None,
is_public=None,
):
"""
Moves item between mailboxes
:param client: EWS Client
:param item_id: item id
:param destination_mailbox: destination mailbox
:param destination_folder_path: destination folder path
:param (Optional) source_mailbox: source mailbox
:param (Optional) is_public: is the destination folder public
:return: Output tuple
"""
source_account = client.get_account(source_mailbox)
destination_account = client.get_account(destination_mailbox)
is_public = client.is_default_folder(destination_folder_path, is_public)
destination_folder = client.get_folder_by_path(
destination_folder_path, destination_account, is_public
)
item = client.get_item_from_mailbox(source_account, item_id)
exported_items = source_account.export([item])
destination_account.upload([(destination_folder, exported_items[0])])
source_account.bulk_delete([item])
move_result = {
MOVED_TO_MAILBOX: destination_mailbox,
MOVED_TO_FOLDER: destination_folder_path,
}
readable_output = "Item was moved successfully."
output = {f"EWS.Items(val.itemId === '{item_id}')": move_result}
return readable_output, output, move_result
def move_item(
client: EWSClient, item_id, target_folder_path, target_mailbox=None, is_public=None
):
"""
Moves an item within the same mailbox
:param client: EWS Client
:param item_id: item id
:param target_folder_path: target folder path
:param (Optional) target_mailbox: mailbox containing the item
:param (Optional) is_public: is the destination folder public
:return: Output tuple
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, is_public=is_public)
item = client.get_item_from_mailbox(account, item_id)
if isinstance(item, ErrorInvalidIdMalformed):
raise Exception("Item not found")
item.move(target_folder)
move_result = {
NEW_ITEM_ID: item.id,
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: "moved",
}
readable_output = tableToMarkdown("Moved items", move_result)
output = {CONTEXT_UPDATE_EWS_ITEM: move_result}
return readable_output, output, move_result
def delete_items(client: EWSClient, item_ids, delete_type, target_mailbox=None):
"""
Delete items in a mailbox
:param client: EWS Client
:param item_ids: items ids to delete
:param delete_type: delte type soft/hard
:param (Optional) target_mailbox: mailbox containinf the items
:return: Output tuple
"""
deleted_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
delete_type = delete_type.lower()
for item in items:
item_id = item.id
if delete_type == "trash":
item.move_to_trash()
elif delete_type == "soft":
item.soft_delete()
elif delete_type == "hard":
item.delete()
else:
raise Exception(
f'invalid delete type: {delete_type}. Use "trash" \\ "soft" \\ "hard"'
)
deleted_items.append(
{
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: f"{delete_type}-deleted",
}
)
readable_output = tableToMarkdown(
f"Deleted items ({delete_type} delete type)", deleted_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: deleted_items}
return readable_output, output, deleted_items
def search_items_in_mailbox(
client: EWSClient,
query=None,
message_id=None,
folder_path="",
limit=100,
target_mailbox=None,
is_public=None,
selected_fields="all",
):
"""
Search items in mailbox
:param client: EWS Client
:param (Optional) query: query to execute
:param (Optional) message_id: message ids to search
:param (Optional) folder_path: folder path to search
:param (Optional) limit: max amount of items to fetch
:param (Optional) target_mailbox: mailbox containing the items
:param (Optional) is_public: is the targeted folder public
:param (Optional) selected_fields: Selected fields
:return: Output tuple
"""
if not query and not message_id:
return_error("Missing required argument. Provide query or message-id")
if message_id and message_id[0] != "<" and message_id[-1] != ">":
message_id = "<{}>".format(message_id)
account = client.get_account(target_mailbox)
limit = int(limit)
if folder_path.lower() == "inbox":
folders = [account.inbox]
elif folder_path:
is_public = client.is_default_folder(folder_path, is_public)
folders = [client.get_folder_by_path(folder_path, account, is_public)]
else:
folders = account.inbox.parent.walk() # pylint: disable=E1101
items = [] # type: ignore
selected_all_fields = selected_fields == "all"
if selected_all_fields:
restricted_fields = list([x.name for x in Message.FIELDS]) # type: ignore
else:
restricted_fields = set(argToList(selected_fields)) # type: ignore
restricted_fields.update(["id", "message_id"]) # type: ignore
for folder in folders:
if Message not in folder.supported_item_models:
continue
if query:
items_qs = folder.filter(query).only(*restricted_fields)
else:
items_qs = folder.filter(message_id=message_id).only(*restricted_fields)
items += get_limited_number_of_messages_from_qs(items_qs, limit)
if len(items) >= limit:
break
items = items[:limit]
searched_items_result = [
parse_item_as_dict(
item,
account.primary_smtp_address,
camel_case=True,
compact_fields=selected_all_fields,
)
for item in items
]
if not selected_all_fields:
searched_items_result = [
{k: v for (k, v) in i.items() if k in keys_to_camel_case(restricted_fields)}
for i in searched_items_result
]
for item in searched_items_result:
item["itemId"] = item.pop("id", "")
readable_output = tableToMarkdown(
"Searched items",
searched_items_result,
headers=ITEMS_RESULTS_HEADERS if selected_all_fields else None,
)
output = {CONTEXT_UPDATE_EWS_ITEM: searched_items_result}
return readable_output, output, searched_items_result
def get_out_of_office_state(client: EWSClient, target_mailbox=None):
"""
Retrieve get out of office state of the targeted mailbox
:param client: EWS Client
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
oof = account.oof_settings
oof_dict = {
"state": oof.state, # pylint: disable=E1101
"externalAudience": getattr(oof, "external_audience", None),
"start": oof.start.ewsformat() if oof.start else None, # pylint: disable=E1101
"end": oof.end.ewsformat() if oof.end else None, # pylint: disable=E1101
"internalReply": getattr(oof, "internal_replay", None),
"externalReply": getattr(oof, "external_replay", None),
MAILBOX: account.primary_smtp_address,
}
readable_output = tableToMarkdown(
f"Out of office state for {account.primary_smtp_address}", oof_dict
)
output = {f"Account.Email(val.Address == obj.{MAILBOX}).OutOfOffice": oof_dict}
return readable_output, output, oof_dict
def recover_soft_delete_item(
client: EWSClient,
message_ids,
target_folder_path="Inbox",
target_mailbox=None,
is_public=None,
):
"""
Recovers soft deleted items
:param client: EWS Client
:param message_ids: Message ids to recover
:param (Optional) target_folder_path: target folder path
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the target folder public
:return:
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, account, is_public)
recovered_messages = []
message_ids = argToList(message_ids)
items_to_recover = account.recoverable_items_deletions.filter( # pylint: disable=E1101
message_id__in=message_ids
).all() # pylint: disable=E1101
recovered_items = set()
for item in items_to_recover:
recovered_items.add(item)
if len(recovered_items) != len(message_ids):
missing_items = set(message_ids).difference(recovered_items)
raise Exception(
f"Some message ids are missing in recoverable items directory: {missing_items}"
)
for item in recovered_items:
item.move(target_folder)
recovered_messages.append(
{ITEM_ID: item.id, MESSAGE_ID: item.message_id, ACTION: "recovered"}
)
readable_output = tableToMarkdown("Recovered messages", recovered_messages)
output = {CONTEXT_UPDATE_EWS_ITEM: recovered_messages}
return readable_output, output, recovered_messages
def get_contacts(client: EWSClient, limit, target_mailbox=None):
"""
Retrieve contacts of the target mailbox or client mailbox
:param client: EWS Client
:param limit: max amount of contacts to retrieve
:param (Optional) target_mailbox: Target mailbox
:return:
"""
def parse_physical_address(address):
result = {}
for attr in ["city", "country", "label", "state", "street", "zipcode"]:
result[attr] = getattr(address, attr, None)
return result
def parse_phone_number(phone_number):
result = {}
for attr in ["label", "phone_number"]:
result[attr] = getattr(phone_number, attr, None)
return result
def parse_contact(contact):
contact_dict = dict(
(k, v if not isinstance(v, EWSDateTime) else v.ewsformat())
for k, v in list(contact._field_vals())
if isinstance(v, str) or isinstance(v, EWSDateTime)
)
if isinstance(contact, Contact) and contact.physical_addresses:
contact_dict["physical_addresses"] = list(
map(parse_physical_address, contact.physical_addresses)
)
if isinstance(contact, Contact) and contact.phone_numbers:
contact_dict["phone_numbers"] = list(
map(parse_phone_number, contact.phone_numbers)
)
if (
isinstance(contact, Contact)
and contact.email_addresses
and len(contact.email_addresses) > 0
):
contact_dict["emailAddresses"] = [x.email for x in contact.email_addresses]
contact_dict = keys_to_camel_case(contact_dict)
contact_dict = dict((k, v) for k, v in list(contact_dict.items()) if v)
contact_dict.pop("mimeContent", None)
contact_dict["originMailbox"] = target_mailbox
return contact_dict
account = client.get_account(target_mailbox)
contacts = []
for contact in account.contacts.all()[: int(limit)]: # pylint: disable=E1101
contacts.append(parse_contact(contact))
readable_output = tableToMarkdown(f"Email contacts for {target_mailbox}", contacts)
output = {"Account.Email(val.Address == obj.originMailbox).EwsContacts": contacts}
return readable_output, output, contacts
def create_folder(client: EWSClient, new_folder_name, folder_path, target_mailbox=None):
"""
Creates a folder in the target mailbox or the client mailbox
:param client: EWS Client
:param new_folder_name: new folder name
:param folder_path: path of the new folder
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
full_path = os.path.join(folder_path, new_folder_name)
try:
if client.get_folder_by_path(full_path, account):
return f"Folder {full_path} already exists",
except Exception:
pass
parent_folder = client.get_folder_by_path(folder_path, account)
f = Folder(parent=parent_folder, name=new_folder_name)
f.save()
client.get_folder_by_path(full_path, account)
return f"Folder {full_path} created successfully",
def find_folders(client: EWSClient, target_mailbox=None):
"""
Finds folders in the mailbox
:param client: EWS Client
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
root = account.root
if client.is_public_folder:
root = account.public_folders_root
folders = []
for f in root.walk(): # pylint: disable=E1101
folder = folder_to_context_entry(f)
folders.append(folder)
folders_tree = root.tree() # pylint: disable=E1101
readable_output = folders_tree
output = {"EWS.Folders(val.id == obj.id)": folders}
return readable_output, output, folders
def mark_item_as_junk(client: EWSClient, item_id, move_items, target_mailbox=None):
"""
Marks item as junk in the target mailbox or client mailbox
:param client: EWS Client
:param item_id: item ids to mark as junk
:param move_items: "yes" or "no" - to move or not to move to trash
:param (Optional) target_mailbox: target mailbox
:return:
"""
account = client.get_account(target_mailbox)
move_items = move_items.lower() == "yes"
ews_result = MarkAsJunk(account=account).call(item_id=item_id, move_item=move_items)
mark_as_junk_result = {
ITEM_ID: item_id,
}
if ews_result == "Success":
mark_as_junk_result[ACTION] = "marked-as-junk"
else:
raise Exception("Failed mark-item-as-junk with error: " + ews_result)
readable_output = tableToMarkdown("Mark item as junk", mark_as_junk_result)
output = {CONTEXT_UPDATE_EWS_ITEM: mark_as_junk_result}
return readable_output, output, mark_as_junk_result
def get_items_from_folder(
client: EWSClient,
folder_path,
limit=100,
target_mailbox=None,
is_public=None,
get_internal_item="no",
):
"""
Retrieve items from folder path
:param client: EWS Client
:param folder_path: folder path
:param (Optional) limit: max amount of items to retrieve
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the folder public
:param (Optional) get_internal_item: should also retrieve internal items ("no" by default)
:return: Output tuple
"""
account = client.get_account(target_mailbox)
limit = int(limit)
get_internal_item = get_internal_item == "yes"
is_public = client.is_default_folder(folder_path, is_public)
folder = client.get_folder_by_path(folder_path, account, is_public)
qs = folder.filter().order_by("-datetime_created")[:limit]
items = get_limited_number_of_messages_from_qs(qs, limit)
items_result = []
for item in items:
item_attachment = parse_item_as_dict(
item, account.primary_smtp_address, camel_case=True, compact_fields=True
)
for attachment in item.attachments:
if (
get_internal_item
and isinstance(attachment, ItemAttachment)
and isinstance(attachment.item, Message)
):
# if found item attachment - switch item to the attchment
item_attachment = parse_item_as_dict(
attachment.item,
account.primary_smtp_address,
camel_case=True,
compact_fields=True,
)
break
items_result.append(item_attachment)
hm_headers = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"id",
]
readable_output = tableToMarkdown(
"Items in folder " + folder_path, items_result, headers=hm_headers
)
output = {CONTEXT_UPDATE_EWS_ITEM: items_result}
return readable_output, output, items_result
def get_items(client: EWSClient, item_ids, target_mailbox=None):
"""
Get items from target mailbox or client mailbox
:param client: EWS Client
:param item_ids: item ids to retrieve
:param (Optional) target_mailbox: target mailbox to retrieve items from
:return:
"""
item_ids = argToList(item_ids)
account = client.get_account(target_mailbox)
items = client.get_items_from_mailbox(account, item_ids)
items = [x for x in items if isinstance(x, Message)]
items_as_incidents = [parse_incident_from_item(x) for x in items]
items_to_context = [
parse_item_as_dict(x, account.primary_smtp_address, True, True) for x in items
]
readable_output = tableToMarkdown(
"Get items", items_to_context, ITEMS_RESULTS_HEADERS
)
output = {
CONTEXT_UPDATE_EWS_ITEM: items_to_context,
"Email": [email_ec(item) for item in items],
}
return readable_output, output, items_as_incidents
def get_folder(client: EWSClient, folder_path, target_mailbox=None, is_public=None):
"""
Retrieve a folder from the target mailbox or client mailbox
:param client: EWS Client
:param folder_path: folder path to retrieve
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the folder public
:return:
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(folder_path, is_public)
folder = folder_to_context_entry(
client.get_folder_by_path(folder_path, account=account, is_public=is_public)
)
readable_output = tableToMarkdown(f"Folder {folder_path}", folder)
output = {CONTEXT_UPDATE_FOLDER: folder}
return readable_output, output, folder
def folder_to_context_entry(f):
"""
Create a context entry from a folder response
:param f: folder response
:return: dict context entry
"""
try:
f_entry = {
"name": f.name,
"totalCount": f.total_count,
"id": f.id,
"childrenFolderCount": f.child_folder_count,
"changeKey": f.changekey,
}
if "unread_count" in [x.name for x in Folder.FIELDS]:
f_entry["unreadCount"] = f.unread_count
return f_entry
except AttributeError:
if isinstance(f, dict):
return {
"name": f.get("name"),
"totalCount": f.get("total_count"),
"id": f.get("id"),
"childrenFolderCount": f.get("child_folder_count"),
"changeKey": f.get("changekey"),
"unreadCount": f.get("unread_count"),
}
def mark_item_as_read(
client: EWSClient, item_ids, operation="read", target_mailbox=None
):
"""
Marks item as read
:param client: EWS Client
:param item_ids: items ids to mark as read
:param (Optional) operation: operation to execute
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
marked_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
items = [x for x in items if isinstance(x, Message)]
for item in items:
item.is_read = operation == "read"
item.save()
marked_items.append(
{
ITEM_ID: item.id,
MESSAGE_ID: item.message_id,
ACTION: "marked-as-{}".format(operation),
}
)
readable_output = tableToMarkdown(
f"Marked items ({operation} marked operation)", marked_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: marked_items}
return readable_output, output, marked_items
def random_word_generator(length):
"""Generate a random string of given length
"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def handle_html(html_body):
"""
Extract all data-url content from within the html and return as separate attachments.
Due to security implications, we support only images here
We might not have Beautiful Soup so just do regex search
"""
attachments = []
clean_body = ''
last_index = 0
for i, m in enumerate(
re.finditer(r'<img.+?src=\"(data:(image\/.+?);base64,([a-zA-Z0-9+/=\r\n]+?))\"', html_body, re.I)):
attachment = {
'data': base64.b64decode(m.group(3)),
'name': f'image{i}'
}
attachment['cid'] = f'{attachment['name']}@{random_word_generator(8)}.{random_word_generator(8)}'
attachments.append(attachment)
clean_body += html_body[last_index:m.start(1)] + 'cid:' + attachment['cid']
last_index = m.end() - 1
clean_body += html_body[last_index:]
return clean_body, attachments
def collect_manual_attachments(manualAttachObj):
"""Collect all manual attachments' data
Args:
manualAttachObj (str): String representation of the manually attached files list.
Returns:
List[Dict]. List of the files data.
"""
manually_attached_objects = argToList(manualAttachObj)
attachments = []
for attachment in manually_attached_objects:
file_res = demisto.getFilePath(os.path.basename(attachment['RealFileName']))
path = file_res['path']
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': attachment['FileName'],
'data': data,
'cid': ''
})
return attachments
def collect_attachments(attachments_ids, attachments_cids, attachments_names):
"""Collect all attachments' data
Args:
attachments_ids (str): String representation of the files ids list.
attachments_cids (str): String representation of the files content ids list.
attachments_names (str): String representation of the files names list.
Returns:
List[Dict]. List of the files data.
"""
attachments = []
files_ids = argToList(attachments_ids)
files_cids = argToList(attachments_cids)
files_names = argToList(attachments_names)
for index, file_id in enumerate(files_ids):
try:
file_res = demisto.getFilePath(file_id)
path = file_res['path']
if len(files_names) > index and files_names[index]:
filename = files_names[index]
else:
filename = file_res['name']
if len(files_cids) > index and files_cids[index]:
cid = files_cids[index]
else:
cid = ''
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': filename,
'data': data,
'cid': cid
})
except Exception as e:
demisto.error(f'Invalid entry {file_id} with exception: {e}')
return_error(f'Entry {file_id} is not valid or is not a file entry')
return attachments
def handle_transient_files(transient_files, transient_files_contents, transient_files_cids):
"""Creates the transient attachments data
Args:
transient_files (str): String representation of the transient files names list.
transient_files_contents (str): String representation of the transient files content list.
transient_files_cids (str): String representation of the transient files content ids list.
Returns:
List[Dict]. List of the transient files data.
"""
transient_attachments = []
files_names = argToList(transient_files)
files_contents = argToList(transient_files_contents)
files_cids = argToList(transient_files_cids)
for index in range(len(files_names)):
file_name = files_names[index]
if index >= len(files_contents):
break
file_content = bytes(files_contents[index], UTF_8)
if index >= len(files_cids):
file_cid = ''
else:
file_cid = files_cids[index]
transient_attachments.append({
'name': file_name,
'data': file_content,
'cid': file_cid
})
return transient_attachments
def handle_template_params(template_params):
"""Translates the template params if they exist from the context
Args:
template_params (str): JSON string that represent the variables names to be replaced and the desired value.
Value can be either real value or context key to fetch the value from.
Returns:
Dict. `variable_name: value_to_use` of the templated parameters.
"""
actual_params = {}
if template_params:
try:
params = json.loads(template_params)
for p in params:
if params[p].get('value'):
actual_params[p] = params[p]['value']
elif params[p].get('key'):
actual_params[p] = demisto.dt(demisto.context(), params[p]['key'])
except ValueError as e:
return_error('Unable to parse template_params: %s' % (str(e)))
return actual_params
def create_message_object(to, cc, bcc, subject, body, additional_headers):
"""Creates the message object according to the existence of additional custom headers.
"""
if additional_headers:
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body,
**additional_headers
)
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body
)
def create_message(to, subject='', body='', bcc=None, cc=None, html_body=None, attachments=None,
additional_headers=None):
"""Creates the Message object that will be sent.
Args:
to (list): Main recipients.
cc (list): CC recipients.
bcc (list): BCC recipients.
subject (str): Email's subject.
body (str): Email's simple text body.
html_body (str): Email's html body.
attachments (list): Files to be attached to the mail, both inline and as files.
additional_headers (Dict): Custom headers to be added to the message.
Returns:
Message. Message object ready to be sent.
"""
if not html_body:
# This is a simple text message - we cannot have CIDs here
message = create_message_object(to, cc, bcc, subject, body, additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
message.attach(new_attachment)
else:
html_body, html_attachments = handle_html(html_body)
attachments += html_attachments
message = create_message_object(to, cc, bcc, subject, HTMLBody(html_body), additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
else:
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'),
is_inline=True, content_id=attachment.get('cid'))
message.attach(new_attachment)
return message
def add_additional_headers(additional_headers):
"""Adds custom headers to the Message object
Args:
additional_headers (str): Headers list as string. Example: headerName1=headerValue1,headerName2=headerValue2
Returns:
Dict. Headers dictionary in the form of: `header_name: header value`
"""
headers = dict()
for header in argToList(additional_headers):
header_name, header_value = header.split('=', 1)
class TempClass(ExtendedProperty):
distinguished_property_set_id = 'InternetHeaders'
property_name = header_name
property_type = 'String'
try:
Message.register(header_name, TempClass)
headers[header_name] = header_value
except ValueError as e:
demisto.debug('EWSO365 - Header ' + header_name + ' could not be registered. ' + str(e))
return headers
def send_email(client: EWSClient, to, subject='', body="", bcc=None, cc=None, htmlBody=None,
attachIDs="", attachCIDs="", attachNames="", manualAttachObj=None,
transientFile=None, transientFileContent=None, transientFileCID=None, templateParams=None,
additionalHeader=None, raw_message=None):
to = argToList(to)
cc = argToList(cc)
bcc = argToList(bcc)
# Basic validation - we allow pretty much everything but you have to have at least a recipient
# We allow messages without subject and also without body
if not to and not cc and not bcc:
return_error('You must have at least one recipient')
if raw_message:
message = Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
body=raw_message
)
else:
if additionalHeader:
additionalHeader = add_additional_headers(additionalHeader)
# collect all types of attachments
attachments = collect_attachments(attachIDs, attachCIDs, attachNames)
attachments.extend(collect_manual_attachments(manualAttachObj))
attachments.extend(handle_transient_files(transientFile, transientFileContent, transientFileCID))
# update body and html_body with the templated params, if exists
template_params = handle_template_params(templateParams)
if template_params:
body = body.format(**template_params)
if htmlBody:
htmlBody = htmlBody.format(**template_params)
message = create_message(to, subject, body, bcc, cc, htmlBody, attachments, additionalHeader)
client.send_email(message)
return 'Mail sent successfully', {}, {}
def get_item_as_eml(client: EWSClient, item_id, target_mailbox=None):
"""
Retrieve item as an eml
:param client: EWS Client
:param item_id: Item id to retrieve
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
item = client.get_item_from_mailbox(account, item_id)
if item.mime_content:
mime_content = item.mime_content
if isinstance(mime_content, bytes):
email_content = email.message_from_bytes(mime_content)
else:
email_content = email.message_from_string(mime_content)
if item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(email_content.items())
]
for header in item.headers:
if (
header.name,
header.value,
) not in attached_email_headers and header.name != "Content-Type":
email_content.add_header(header.name, header.value)
eml_name = item.subject if item.subject else "demisto_untitled_eml"
file_result = fileResult(eml_name + ".eml", email_content.as_string())
file_result = (
file_result if file_result else "Failed uploading eml file to war room"
)
return file_result
def parse_incident_from_item(item):
"""
Parses an incident from an item
:param item: item to parse
:return: Parsed item
"""
incident = {}
labels = []
try:
incident["details"] = item.text_body or item.body
except AttributeError:
incident["details"] = item.body
incident["name"] = item.subject
labels.append({"type": "Email/subject", "value": item.subject})
incident["occurred"] = item.datetime_created.ewsformat()
# handle recipients
if item.to_recipients:
for recipient in item.to_recipients:
labels.append({"type": "Email", "value": recipient.email_address})
# handle cc
if item.cc_recipients:
for recipient in item.cc_recipients:
labels.append({"type": "Email/cc", "value": recipient.email_address})
# handle email from
if item.sender:
labels.append({"type": "Email/from", "value": item.sender.email_address})
# email format
email_format = ""
try:
if item.text_body:
labels.append({"type": "Email/text", "value": item.text_body})
email_format = "text"
except AttributeError:
pass
if item.body:
labels.append({"type": "Email/html", "value": item.body})
email_format = "HTML"
labels.append({"type": "Email/format", "value": email_format})
# handle attachments
if item.attachments:
incident["attachment"] = []
for attachment in item.attachments:
file_result = None
label_attachment_type = None
label_attachment_id_type = None
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
# file attachment
label_attachment_type = "attachments"
label_attachment_id_type = "attachmentId"
# save the attachment
file_name = get_attachment_name(attachment.name)
file_result = fileResult(file_name, attachment.content)
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name),
}
)
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
continue
else:
# other item attachment
label_attachment_type = "attachmentItems"
label_attachment_id_type = "attachmentItemsId"
# save the attachment
if attachment.item.mime_content:
mime_content = attachment.item.mime_content
attached_email = email.message_from_bytes(mime_content) if isinstance(mime_content, bytes) \
else email.message_from_string(mime_content)
if attachment.item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(attached_email.items())
]
for header in attachment.item.headers:
if (
(header.name, header.value)
not in attached_email_headers
and header.name != "Content-Type"
):
attached_email.add_header(header.name, header.value)
file_result = fileResult(
get_attachment_name(attachment.name) + ".eml",
attached_email.as_bytes().decode('utf-8'),
)
if file_result:
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name) + ".eml",
}
)
labels.append(
{
"type": label_attachment_type,
"value": get_attachment_name(attachment.name),
}
)
labels.append(
{"type": label_attachment_id_type, "value": attachment.attachment_id.id}
)
# handle headers
if item.headers:
headers = []
for header in item.headers:
labels.append(
{
"type": "Email/Header/{}".format(header.name),
"value": str(header.value),
}
)
headers.append("{}: {}".format(header.name, header.value))
labels.append({"type": "Email/headers", "value": "\r\n".join(headers)})
# handle item id
if item.message_id:
labels.append({"type": "Email/MessageId", "value": str(item.message_id)})
if item.id:
labels.append({"type": "Email/ID", "value": item.id})
labels.append({"type": "Email/itemId", "value": item.id})
# handle conversion id
if item.conversation_id:
labels.append({"type": "Email/ConversionID", "value": item.conversation_id.id})
incident["labels"] = labels
incident["rawJSON"] = json.dumps(parse_item_as_dict(item, None), ensure_ascii=False)
return incident
def fetch_emails_as_incidents(client: EWSClient, last_run):
"""
Fetch incidents
:param client: EWS Client
:param last_run: last run dict
:return:
"""
last_run = get_last_run(client, last_run)
try:
last_emails = fetch_last_emails(
client,
client.folder_name,
last_run.get(LAST_RUN_TIME),
last_run.get(LAST_RUN_IDS),
)
ids = deque(
last_run.get(LAST_RUN_IDS, []), maxlen=client.last_run_ids_queue_size
)
incidents = []
incident: Dict[str, str] = {}
for item in last_emails:
if item.message_id:
ids.append(item.message_id)
incident = parse_incident_from_item(item)
incidents.append(incident)
if len(incidents) >= client.max_fetch:
break
last_run_time = incident.get("occurred", last_run.get(LAST_RUN_TIME))
if isinstance(last_run_time, EWSDateTime):
last_run_time = last_run_time.ewsformat()
new_last_run = {
LAST_RUN_TIME: last_run_time,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: list(ids),
ERROR_COUNTER: 0,
}
demisto.setLastRun(new_last_run)
return incidents
except RateLimitError:
if LAST_RUN_TIME in last_run:
last_run[LAST_RUN_TIME] = last_run[LAST_RUN_TIME].ewsformat()
if ERROR_COUNTER not in last_run:
last_run[ERROR_COUNTER] = 0
last_run[ERROR_COUNTER] += 1
demisto.setLastRun(last_run)
if last_run[ERROR_COUNTER] > 2:
raise
return []
def fetch_last_emails(
client: EWSClient, folder_name="Inbox", since_datetime=None, exclude_ids=None
):
"""
Fetches last emails
:param client: EWS client
:param (Optional) folder_name: folder name to pull from
:param (Optional) since_datetime: items will be searched after this datetime
:param (Optional) exclude_ids: exclude ids from fetch
:return: list of exchangelib.Items
"""
qs = client.get_folder_by_path(folder_name, is_public=client.is_public_folder)
if since_datetime:
qs = qs.filter(datetime_received__gte=since_datetime)
else:
last_10_min = EWSDateTime.now(tz=EWSTimeZone.timezone("UTC")) - timedelta(
minutes=10
)
qs = qs.filter(last_modified_time__gte=last_10_min)
qs = qs.filter().only(*[x.name for x in Message.FIELDS])
qs = qs.filter().order_by("datetime_received")
result = qs.all()
result = [x for x in result if isinstance(x, Message)]
if exclude_ids and len(exclude_ids) > 0:
exclude_ids = set(exclude_ids)
result = [x for x in result if x.message_id not in exclude_ids]
return result
def test_module(client: EWSClient, max_fetch):
"""
test-module
* Max incidents per fetch <= MAX_INCIDENTS_PER_FETCH
* Account can be retrieved
* Account has read rights
* Test access to fetch folder
:param client: EWS Client
:param max_fetch: Max fetches per incident
:return: "ok"
"""
try:
if int(max_fetch) > MAX_INCIDENTS_PER_FETCH:
return_error(f'Error - Max incidents per fetch cannot be greater than {MAX_INCIDENTS_PER_FETCH}. '
f'You provided: {max_fetch}')
account = client.get_account()
if not account.root.effective_rights.read: # pylint: disable=E1101
raise Exception(
"Success to authenticate, but user has no permissions to read from the mailbox. "
"Need to delegate the user permissions to the mailbox - "
"please read integration documentation and follow the instructions"
)
client.get_folder_by_path(
client.folder_name, account, client.is_public_folder
).test_access()
except ErrorFolderNotFound as e:
if "Top of Information Store" in str(e):
raise Exception(
"Success to authenticate, but user probably has no permissions to read from the specific folder."
"Check user permissions. You can try !ews-find-folders command to "
"get all the folders structure that the user has permissions to"
)
return "ok"
def sub_main():
is_test_module = False
params = demisto.params()
args = prepare_args(demisto.args())
# client's default_target_mailbox is the authorization source for the instance
params['default_target_mailbox'] = args.get('target_mailbox',
args.get('source_mailbox', params['default_target_mailbox']))
client = EWSClient(**params)
start_logging()
try:
command = demisto.command()
# commands that return a single note result
normal_commands = {
"ews-get-searchable-mailboxes": get_searchable_mailboxes,
"ews-move-item-between-mailboxes": move_item_between_mailboxes,
"ews-move-item": move_item,
"ews-delete-items": delete_items,
"ews-search-mailbox": search_items_in_mailbox,
"ews-get-contacts": get_contacts,
"ews-get-out-of-office": get_out_of_office_state,
"ews-recover-messages": recover_soft_delete_item,
"ews-create-folder": create_folder,
"ews-mark-item-as-junk": mark_item_as_junk,
"ews-find-folders": find_folders,
"ews-get-items-from-folder": get_items_from_folder,
"ews-get-items": get_items,
"ews-get-folder": get_folder,
"ews-expand-group": get_expanded_group,
"ews-mark-items-as-read": mark_item_as_read,
"send-mail": send_email,
}
# commands that may return multiple results or non-note result
special_output_commands = {
"ews-get-attachment": fetch_attachments_for_message,
"ews-delete-attachment": delete_attachments_for_message,
"ews-get-items-as-eml": get_item_as_eml,
}
# system commands:
if command == "test-module":
is_test_module = True
demisto.results(test_module(client, params.get('max_fetch')))
elif command == "fetch-incidents":
last_run = demisto.getLastRun()
incidents = fetch_emails_as_incidents(client, last_run)
demisto.incidents(incidents)
# special outputs commands
elif command in special_output_commands:
demisto.results(special_output_commands[command](client, **args)) # type: ignore[operator]
# normal commands
else:
output = normal_commands[command](client, **args) # type: ignore[operator]
return_outputs(*output)
except Exception as e:
start_logging()
debug_log = log_stream.getvalue() # type: ignore[union-attr]
error_message_simple = ""
# Office365 regular maintenance case
if isinstance(e, ErrorMailboxStoreUnavailable) or isinstance(
e, ErrorMailboxMoveInProgress
):
log_message = (
"Office365 is undergoing load balancing operations. "
"As a result, the service is temporarily unavailable."
)
if demisto.command() == "fetch-incidents":
demisto.info(log_message)
demisto.incidents([])
sys.exit(0)
if is_test_module:
demisto.results(
log_message + " Please retry the instance configuration test."
)
sys.exit(0)
error_message_simple = log_message + " Please retry your request."
if isinstance(e, ConnectionError):
error_message_simple = (
"Could not connect to the server.\n"
f"Additional information: {str(e)}"
)
else:
if is_test_module and isinstance(e, MalformedResponseError):
error_message_simple = (
"Got invalid response from the server.\n"
)
# Legacy error handling
if "Status code: 401" in debug_log:
error_message_simple = (
"Got unauthorized from the server. "
)
if "Status code: 503" in debug_log:
error_message_simple = (
"Got timeout from the server. "
"Probably the server is not reachable with the current settings. "
)
if not error_message_simple:
error_message = error_message_simple = str(e)
else:
error_message = error_message_simple + "\n" + str(e)
stacktrace = traceback.format_exc()
if stacktrace:
error_message += "\nFull stacktrace:\n" + stacktrace
if debug_log:
error_message += "\nFull debug log:\n" + debug_log
if demisto.command() == "fetch-incidents":
raise
if demisto.command() == "ews-search-mailbox" and isinstance(e, ValueError):
return_error(
message="Selected invalid field, please specify valid field name.",
error=e,
)
if is_test_module:
demisto.results(error_message_simple)
else:
demisto.results(
{
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": error_message_simple,
}
)
demisto.error(f"{e.__class__.__name__}: {error_message}")
finally:
exchangelib_cleanup()
if log_stream:
try:
logging.getLogger().removeHandler(log_handler) # type: ignore
log_stream.close()
except Exception as ex:
demisto.error(
"EWS: unexpected exception when trying to remove log handler: {}".format(
ex
)
)
def process_main():
"""setup stdin to fd=0 so we can read from the server"""
sys.stdin = os.fdopen(0, "r")
sub_main()
def main():
# When running big queries, like 'ews-search-mailbox' the memory might not freed by the garbage
# collector. `separate_process` flag will run the integration on a separate process that will prevent
# memory leakage.
separate_process = demisto.params().get("separate_process", False)
demisto.debug("Running as separate_process: {}".format(separate_process))
if separate_process:
try:
p = Process(target=process_main)
p.start()
p.join()
except Exception as ex:
demisto.error("Failed starting Process: {}".format(ex))
else:
sub_main()
from MicrosoftApiModule import * # noqa: E402
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
|
import random
import string
from typing import Dict
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import sys
import traceback
import json
import os
import hashlib
from datetime import timedelta
from io import StringIO
import logging
import warnings
import email
from requests.exceptions import ConnectionError
from collections import deque
from multiprocessing import Process
import exchangelib
from exchangelib.errors import (
ErrorItemNotFound,
ResponseMessageError,
RateLimitError,
ErrorInvalidIdMalformed,
ErrorFolderNotFound,
ErrorMailboxStoreUnavailable,
ErrorMailboxMoveInProgress,
ErrorNameResolutionNoResults,
MalformedResponseError,
)
from exchangelib.items import Item, Message, Contact
from exchangelib.services.common import EWSService, EWSAccountService
from exchangelib.util import create_element, add_xml_child, MNS, TNS
from exchangelib import (
IMPERSONATION,
Account,
EWSDateTime,
EWSTimeZone,
Configuration,
FileAttachment,
Version,
Folder,
HTMLBody,
Body,
ItemAttachment,
OAUTH2,
OAuth2AuthorizationCodeCredentials,
Identity,
ExtendedProperty
)
from oauthlib.oauth2 import OAuth2Token
from exchangelib.version import EXCHANGE_O365
from exchangelib.protocol import BaseProtocol, NoVerifyHTTPAdapter
# Ignore warnings print to stdout
warnings.filterwarnings("ignore")
""" Constants """
APP_NAME = "ms-ews-o365"
FOLDER_ID_LEN = 120
MAX_INCIDENTS_PER_FETCH = 50
# move results
MOVED_TO_MAILBOX = "movedToMailbox"
MOVED_TO_FOLDER = "movedToFolder"
# item types
FILE_ATTACHMENT_TYPE = "FileAttachment"
ITEM_ATTACHMENT_TYPE = "ItemAttachment"
ATTACHMENT_TYPE = "attachmentType"
TOIS_PATH = "/root/Top of Information Store/"
# context keys
ATTACHMENT_ID = "attachmentId"
ATTACHMENT_ORIGINAL_ITEM_ID = "originalItemId"
NEW_ITEM_ID = "newItemId"
MESSAGE_ID = "messageId"
ITEM_ID = "itemId"
ACTION = "action"
MAILBOX = "mailbox"
MAILBOX_ID = "mailboxId"
FOLDER_ID = "id"
TARGET_MAILBOX = 'receivedBy'
# context paths
CONTEXT_UPDATE_EWS_ITEM = f"EWS.Items((val.{ITEM_ID} === obj.{ITEM_ID} || " \
f"(val.{MESSAGE_ID} && obj.{MESSAGE_ID} && val.{MESSAGE_ID} === obj.{MESSAGE_ID}))" \
f" && val.{TARGET_MAILBOX} === obj.{TARGET_MAILBOX})"
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT = "EWS.Items(val.{0} == obj.{1})".format(
ITEM_ID, ATTACHMENT_ORIGINAL_ITEM_ID
)
CONTEXT_UPDATE_ITEM_ATTACHMENT = ".ItemAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FILE_ATTACHMENT = ".FileAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FOLDER = "EWS.Folders(val.{0} == obj.{0})".format(FOLDER_ID)
# fetch params
LAST_RUN_TIME = "lastRunTime"
LAST_RUN_IDS = "ids"
LAST_RUN_FOLDER = "folderName"
ERROR_COUNTER = "errorCounter"
# headers
ITEMS_RESULTS_HEADERS = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"textBody",
]
UTF_8 = 'utf-8'
""" Classes """
class ProxyAdapter(requests.adapters.HTTPAdapter):
"""
Proxy Adapter used to add PROXY to requests
"""
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class InsecureProxyAdapter(NoVerifyHTTPAdapter):
"""
Insecure Proxy Adapter used to add PROXY and INSECURE to requests
NoVerifyHTTPAdapter is a built-in insecure HTTPAdapter class
"""
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class EWSClient:
def __init__(
self,
default_target_mailbox,
client_id,
client_secret,
tenant_id,
folder="Inbox",
is_public_folder=False,
request_timeout="120",
max_fetch=MAX_INCIDENTS_PER_FETCH,
self_deployed=True,
insecure=True,
proxy=False,
**kwargs,
):
"""
Client used to communicate with EWS
:param default_target_mailbox: Email address from which to fetch incidents
:param client_id: Application client ID
:param client_secret: Application client secret
:param folder: Name of the folder from which to fetch incidents
:param is_public_folder: Public Folder flag
:param request_timeout: Timeout (in seconds) for HTTP requests to Exchange Server
:param max_fetch: Max incidents per fetch
:param insecure: Trust any certificate (not secure)
"""
BaseProtocol.TIMEOUT = int(request_timeout)
self.ews_server = "https://outlook.office365.com/EWS/Exchange.asmx/"
self.ms_client = MicrosoftClient(
tenant_id=tenant_id,
auth_id=client_id,
enc_key=client_secret,
app_name=APP_NAME,
base_url=self.ews_server,
verify=not insecure,
proxy=proxy,
self_deployed=self_deployed,
scope="https://outlook.office.com/.default",
)
self.folder_name = folder
self.is_public_folder = is_public_folder
self.access_type = kwargs.get('access_type') or IMPERSONATION
self.max_fetch = min(MAX_INCIDENTS_PER_FETCH, int(max_fetch))
self.last_run_ids_queue_size = 500
self.client_id = client_id
self.client_secret = client_secret
self.account_email = default_target_mailbox
self.config = self.__prepare(insecure)
self.protocol = BaseProtocol(self.config)
def __prepare(self, insecure):
"""
Prepares the client PROTOCOL, CREDENTIALS and CONFIGURATION
:param insecure: Trust any certificate (not secure)
:return: OAuth 2 Configuration
"""
BaseProtocol.HTTP_ADAPTER_CLS = InsecureProxyAdapter if insecure else ProxyAdapter
access_token = self.ms_client.get_access_token()
oauth2_token = OAuth2Token({"access_token": access_token})
self.credentials = credentials = OAuth2AuthorizationCodeCredentials(
client_id=self.client_id,
client_secret=self.client_secret,
access_token=oauth2_token,
)
# need to add identity for protocol OAuth header
self.credentials.identity = Identity(upn=self.account_email)
config_args = {
"credentials": credentials,
"auth_type": OAUTH2,
"version": Version(EXCHANGE_O365),
"service_endpoint": "https://outlook.office365.com/EWS/Exchange.asmx",
}
return Configuration(**config_args)
def get_account(self, target_mailbox=None):
"""
Request an account from EWS
:param (Optional) target_mailbox: Mailbox associated with the requested account
:return: exchangelib Account
"""
if not target_mailbox:
target_mailbox = self.account_email
return Account(
primary_smtp_address=target_mailbox,
autodiscover=False,
config=self.config,
access_type=self.access_type,
)
def get_items_from_mailbox(self, account, item_ids):
"""
Request specific items from a mailbox associated with an account
:param account: EWS account or target_mailbox associated with that account
:param item_ids: item_ids of the requested items
:return: list of exchangelib Items
"""
# allow user to pass target_mailbox as account
if isinstance(account, str):
account = self.get_account(account)
else:
account = self.get_account(self.account_email)
if type(item_ids) is not list:
item_ids = [item_ids]
items = [Item(id=x) for x in item_ids]
result = list(account.fetch(ids=items))
result = [x for x in result if not isinstance(x, ErrorItemNotFound)]
if len(result) != len(item_ids):
raise Exception(
"One or more items were not found. Check the input item ids"
)
return result
def get_item_from_mailbox(self, account, item_id):
"""
Request a single item from a mailbox associated with an account
:param account: EWS account or target_mailbox associated with that account
:param item_id: item_id of the requested item
:return: exchangelib Item
"""
result = self.get_items_from_mailbox(account, [item_id])
if len(result) == 0:
raise Exception(f"ItemId {str(item_id)} not found")
return result[0]
def get_attachments_for_item(self, item_id, account, attachment_ids=None):
"""
Request attachments for an item
:param item_id: item_id of the item to retrieve attachments from
:param account: EWS account or target_mailbox associated with that account
:param (Optional) attachment_ids: attachment_ids: attachment_ids to retrieve
:return: list of exchangelib Item.attachments
"""
item = self.get_item_from_mailbox(account, item_id)
attachments = []
attachment_ids = argToList(attachment_ids)
if item:
if item.attachments:
for attachment in item.attachments:
if (
attachment_ids
and attachment.attachment_id.id not in attachment_ids
):
continue
attachments.append(attachment)
else:
raise Exception("Message item not found: " + item_id)
if attachment_ids and len(attachments) < len(attachment_ids):
raise Exception(
"Some attachment id did not found for message:" + str(attachment_ids)
)
return attachments
def is_default_folder(self, folder_path, is_public=None):
"""
Is the given folder_path public
:param folder_path: folder path to check if is public
:param is_public: (Optional) if provided, will return this value
:return: Boolean
"""
if is_public is not None:
return is_public
if folder_path == self.folder_name:
return self.is_public_folder
return False
def get_folder_by_path(self, path, account=None, is_public=False):
"""
Retrieve folder by path
:param path: path of the folder
:param account: account associated with the requested path
:param is_public: is the requested folder public
:return: exchangelib Folder
"""
if account is None:
account = self.get_account()
# handle exchange folder id
if len(path) == FOLDER_ID_LEN:
folders_map = account.root._folders_map
if path in folders_map:
return account.root._folders_map[path]
if is_public:
folder_result = account.public_folders_root
elif path == "AllItems":
folder_result = account.root
else:
folder_result = account.inbox.parent # Top of Information Store
path = path.replace("/", "\\")
path = path.split("\\")
for sub_folder_name in path:
folder_filter_by_name = [
x
for x in folder_result.children
if x.name.lower() == sub_folder_name.lower()
]
if len(folder_filter_by_name) == 0:
raise Exception(f"No such folder {path}")
folder_result = folder_filter_by_name[0]
return folder_result
def send_email(self, message: Message):
account = self.get_account()
message.account = account
message.send_and_save()
class MarkAsJunk(EWSAccountService):
"""
EWSAccountService class used for marking items as junk
"""
SERVICE_NAME = "MarkAsJunk"
def call(self, item_id, move_item):
elements = list(
self._get_elements(
payload=self.get_payload(item_id=item_id, move_item=move_item)
)
)
for element in elements:
if isinstance(element, ResponseMessageError):
return str(element)
return "Success"
def get_payload(self, item_id, move_item):
junk = create_element(
f"m:{self.SERVICE_NAME}",
{"IsJunk": "true", "MoveItem": "true" if move_item else "false"},
)
items_list = create_element("m:ItemIds")
item_element = create_element("t:ItemId", {"Id": item_id})
items_list.append(item_element)
junk.append(items_list)
return junk
class GetSearchableMailboxes(EWSService):
"""
EWSAccountService class used for getting Searchable Mailboxes
"""
SERVICE_NAME = "GetSearchableMailboxes"
element_container_name = f"{{{MNS}}}SearchableMailboxes"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}PrimarySmtpAddress").text
if element.find(f"{{{TNS}}}PrimarySmtpAddress") is not None
else None,
MAILBOX_ID: element.find(f"{{{TNS}}}ReferenceId").text
if element.find(f"{{{TNS}}}ReferenceId") is not None
else None,
"displayName": element.find(f"{{{TNS}}}DisplayName").text
if element.find(f"{{{TNS}}}DisplayName") is not None
else None,
"isExternal": element.find(f"{{{TNS}}}IsExternalMailbox").text
if element.find(f"{{{TNS}}}IsExternalMailbox") is not None
else None,
"externalEmailAddress": element.find(f"{{{TNS}}}ExternalEmailAddress").text
if element.find(f"{{{TNS}}}ExternalEmailAddress") is not None
else None,
}
def call(self):
elements = self._get_elements(payload=self.get_payload())
return [
self.parse_element(x)
for x in elements
if x.find(f"{{{TNS}}}ReferenceId").text
]
def get_payload(self):
element = create_element(f"m:{self.SERVICE_NAME}")
return element
class ExpandGroup(EWSService):
"""
EWSAccountService class used for expanding groups
"""
SERVICE_NAME = "ExpandDL"
element_container_name = f"{{{MNS}}}DLExpansion"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}EmailAddress").text
if element.find(f"{{{TNS}}}EmailAddress") is not None
else None,
"displayName": element.find(f"{{{TNS}}}Name").text
if element.find(f"{{{TNS}}}Name") is not None
else None,
"mailboxType": element.find(f"{{{TNS}}}MailboxType").text
if element.find(f"{{{TNS}}}MailboxType") is not None
else None,
}
def call(self, email_address, recursive_expansion=False):
try:
if recursive_expansion == "True":
group_members: Dict = {}
self.expand_group_recursive(email_address, group_members)
return list(group_members.values())
else:
return self.expand_group(email_address)
except ErrorNameResolutionNoResults:
demisto.results("No results were found.")
sys.exit()
def get_payload(self, email_address):
element = create_element(f"m:{self.SERVICE_NAME}")
mailbox_element = create_element("m:Mailbox")
add_xml_child(mailbox_element, "t:EmailAddress", email_address)
element.append(mailbox_element)
return element
def expand_group(self, email_address):
"""
Expand given group
:param email_address: email address of the group to expand
:return: list dict with parsed expanded group data
"""
elements = self._get_elements(payload=self.get_payload(email_address))
return [self.parse_element(x) for x in elements]
def expand_group_recursive(self, email_address, non_dl_emails, dl_emails=None):
"""
Expand group recursively
:param email_address: email address of the group to expand
:param non_dl_emails: non distribution only emails
:param dl_emails: (Optional) distribution only emails
:return: Set of dl emails and non dl emails (returned via reference)
"""
if dl_emails is None:
dl_emails = set()
if email_address in non_dl_emails or email_address in dl_emails:
return None
dl_emails.add(email_address)
for member in self.expand_group(email_address):
if (
member["mailboxType"] == "PublicDL"
or member["mailboxType"] == "PrivateDL"
):
self.expand_group_recursive(member.get("mailbox"), non_dl_emails, dl_emails)
else:
if member["mailbox"] not in non_dl_emails:
non_dl_emails[member["mailbox"]] = member
# If you are modifying this probably also need to modify in other files
def exchangelib_cleanup():
key_protocols = list(exchangelib.protocol.CachingProtocol._protocol_cache.items())
try:
exchangelib.close_connections()
except Exception as ex:
demisto.error("Error was found in exchangelib cleanup, ignoring: {}".format(ex))
for key, protocol in key_protocols:
try:
if "thread_pool" in protocol.__dict__:
demisto.debug(
"terminating thread pool key{} id: {}".format(
key, id(protocol.thread_pool)
)
)
protocol.thread_pool.terminate()
del protocol.__dict__["thread_pool"]
else:
demisto.info(
"Thread pool not found (ignoring terminate) in protcol dict: {}".format(
dir(protocol.__dict__)
)
)
except Exception as ex:
demisto.error("Error with thread_pool.terminate, ignoring: {}".format(ex))
""" LOGGING """
log_stream = None
log_handler = None
def start_logging():
global log_stream
global log_handler
logging.raiseExceptions = False
if log_stream is None:
log_stream = StringIO()
log_handler = logging.StreamHandler(stream=log_stream)
log_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logger = logging.getLogger()
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG)
""" Helper Functions """
def get_attachment_name(attachment_name):
"""
Retrieve attachment name or error string if none is provided
:param attachment_name: attachment name to retrieve
:return: string
"""
if attachment_name is None or attachment_name == "":
return "demisto_untitled_attachment"
return attachment_name
def get_entry_for_object(title, context_key, obj, headers=None):
"""
Create an entry for a given object
:param title: Title of the human readable
:param context_key: Context key used for entry context
:param obj: Object to create entry for
:param headers: (Optional) headers used in the tableToMarkDown
:return: Entry object to be used with demisto.results()
"""
if len(obj) == 0:
return "There is no output results"
if headers and isinstance(obj, dict):
headers = list(set(headers).intersection(set(obj.keys())))
return {
"Type": entryTypes["note"],
"Contents": obj,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": tableToMarkdown(title, obj, headers),
"EntryContext": {context_key: obj},
}
def prepare_args(args):
"""
Prepare arguments to be used as the API expects it
:param args: demisto args
:return: transformed args
"""
args = dict((k.replace("-", "_"), v) for k, v in list(args.items()))
if "is_public" in args:
args["is_public"] = args["is_public"] == "True"
return args
def get_limited_number_of_messages_from_qs(qs, limit):
"""
Retrieve a limited number of messages from query search
:param qs: query search to execute
:param limit: limit on number of items to retrieve from search
:return: list of exchangelib.Message
"""
count = 0
results = []
for item in qs:
if count == limit:
break
if isinstance(item, Message):
count += 1
results.append(item)
return results
def keys_to_camel_case(value):
"""
Transform keys from snake to camel case (does nothing if no snakes are found)
:param value: value to transform
:return: transformed value
"""
def str_to_camel_case(snake_str):
components = snake_str.split("_")
return components[0] + "".join(x.title() for x in components[1:])
if value is None:
return None
if isinstance(value, (list, set)):
return list(map(keys_to_camel_case, value))
if isinstance(value, dict):
return dict(
(
keys_to_camel_case(k),
keys_to_camel_case(v) if isinstance(v, (list, dict)) else v,
)
for (k, v) in list(value.items())
)
return str_to_camel_case(value)
def get_last_run(client: EWSClient, last_run=None):
"""
Retrieve the last run time
:param client: EWS Client
:param last_run: (Optional) last run object
:return: last run dict
"""
if not last_run or last_run.get(LAST_RUN_FOLDER) != client.folder_name:
last_run = {
LAST_RUN_TIME: None,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: [],
}
if LAST_RUN_TIME in last_run and last_run[LAST_RUN_TIME] is not None:
last_run[LAST_RUN_TIME] = EWSDateTime.from_string(last_run[LAST_RUN_TIME])
# In case we have existing last_run data
if last_run.get(LAST_RUN_IDS) is None:
last_run[LAST_RUN_IDS] = []
return last_run
def email_ec(item):
"""
Create entry context for an email
:param item: exchangelib.Item
:return: entry context dict
"""
return {
"CC": None
if not item.cc_recipients
else [mailbox.email_address for mailbox in item.cc_recipients],
"BCC": None
if not item.bcc_recipients
else [mailbox.email_address for mailbox in item.bcc_recipients],
"To": None
if not item.to_recipients
else [mailbox.email_address for mailbox in item.to_recipients],
"From": item.author.email_address,
"Subject": item.subject,
"Text": item.text_body,
"HTML": item.body,
"HeadersMap": {header.name: header.value for header in item.headers},
}
def parse_item_as_dict(item, email_address=None, camel_case=False, compact_fields=False):
"""
Parses an exchangelib item as a dict
:param item: exchangelib.Item to parse
:param (Optional) email_address: string mailbox
:param (Optional) camel_case: Is camel case
:param (Optional) compact_fields: Is compact fields
:return: Item as a dict
"""
def parse_object_as_dict(obj):
raw_dict = {}
if obj is not None:
for field in obj.FIELDS:
raw_dict[field.name] = getattr(obj, field.name, None)
return raw_dict
def parse_folder_as_json(folder):
raw_dict = parse_object_as_dict(folder)
if "parent_folder_id" in raw_dict:
raw_dict["parent_folder_id"] = parse_folder_as_json(
raw_dict["parent_folder_id"]
)
if "effective_rights" in raw_dict:
raw_dict["effective_rights"] = parse_object_as_dict(
raw_dict["effective_rights"]
)
return raw_dict
raw_dict = {}
for field, value in item._field_vals():
if type(value) in [str, str, int, float, bool, Body, HTMLBody, None]:
raw_dict[field] = value
raw_dict["id"] = item.id
if getattr(item, "attachments", None):
raw_dict["attachments"] = [
parse_attachment_as_dict(item.id, x) for x in item.attachments
]
for time_field in [
"datetime_sent",
"datetime_created",
"datetime_received",
"last_modified_time",
"reminder_due_by",
]:
value = getattr(item, time_field, None)
if value:
raw_dict[time_field] = value.ewsformat()
for dict_field in [
"effective_rights",
"parent_folder_id",
"conversation_id",
"author",
"extern_id",
"received_by",
"received_representing",
"reply_to",
"sender",
"folder",
]:
value = getattr(item, dict_field, None)
if value:
if isinstance(value, list):
raw_dict[dict_field] = []
for single_val in value:
raw_dict[dict_field].append(parse_object_as_dict(single_val))
else:
raw_dict[dict_field] = parse_object_as_dict(value)
for list_dict_field in ["headers", "cc_recipients", "to_recipients"]:
value = getattr(item, list_dict_field, None)
if value:
raw_dict[list_dict_field] = [parse_object_as_dict(x) for x in value]
if getattr(item, "folder", None):
raw_dict["folder"] = parse_folder_as_json(item.folder)
folder_path = (
item.folder.absolute[len(TOIS_PATH):]
if item.folder.absolute.startswith(TOIS_PATH)
else item.folder.absolute
)
raw_dict["folder_path"] = folder_path
if compact_fields:
new_dict = {}
# noinspection PyListCreation
fields_list = [
"datetime_created",
"datetime_received",
"datetime_sent",
"sender",
"has_attachments",
"importance",
"message_id",
"last_modified_time",
"size",
"subject",
"text_body",
"headers",
"body",
"folder_path",
"is_read",
]
if "id" in raw_dict:
new_dict["itemId"] = raw_dict["id"]
fields_list.append("itemId")
for field in fields_list:
if field in raw_dict:
new_dict[field] = raw_dict.get(field)
for field in ["received_by", "author", "sender"]:
if field in raw_dict:
new_dict[field] = raw_dict.get(field, {}).get("email_address")
for field in ["to_recipients"]:
if field in raw_dict:
new_dict[field] = [x.get("email_address") for x in raw_dict[field]]
attachments = raw_dict.get("attachments")
if attachments and len(attachments) > 0:
file_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == FILE_ATTACHMENT_TYPE
]
if len(file_attachments) > 0:
new_dict["FileAttachments"] = file_attachments
item_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == ITEM_ATTACHMENT_TYPE
]
if len(item_attachments) > 0:
new_dict["ItemAttachments"] = item_attachments
raw_dict = new_dict
if camel_case:
raw_dict = keys_to_camel_case(raw_dict)
if email_address:
raw_dict[MAILBOX] = email_address
return raw_dict
def get_entry_for_file_attachment(item_id, attachment):
"""
Creates a file entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: file entry dict for attachment
"""
entry = fileResult(get_attachment_name(attachment.name), attachment.content)
entry["EntryContext"] = {
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT
+ CONTEXT_UPDATE_FILE_ATTACHMENT: parse_attachment_as_dict(item_id, attachment)
}
return entry
def parse_attachment_as_dict(item_id, attachment):
"""
Creates a note entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: note entry dict for attachment
"""
try:
attachment_content = (
attachment.content
if isinstance(attachment, FileAttachment)
else attachment.item.mime_content
)
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": hashlib.sha256(attachment_content).hexdigest()
if attachment_content
else None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
def get_entry_for_item_attachment(item_id, attachment, target_email):
"""
Creates a note entry for an item attachment
:param item_id: Item id
:param attachment: exchangelib attachment
:param target_email: target email
:return: note entry dict for item attachment
"""
item = attachment.item
dict_result = parse_attachment_as_dict(item_id, attachment)
dict_result.update(
parse_item_as_dict(item, target_email, camel_case=True, compact_fields=True)
)
title = f'EWS get attachment got item for "{target_email}", "{get_attachment_name(attachment.name)}"'
return get_entry_for_object(
title,
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT + CONTEXT_UPDATE_ITEM_ATTACHMENT,
dict_result,
)
""" Command Functions """
def get_expanded_group(client: EWSClient, email_address, recursive_expansion=False):
"""
Retrieve expanded group command
:param client: EWS Client
:param email_address: Email address of the group to expand
:param (Optional) recursive_expansion: Whether to enable recursive expansion. Default is "False".
:return: Expanded groups output tuple
"""
group_members = ExpandGroup(protocol=client.protocol).call(
email_address, recursive_expansion
)
group_details = {"name": email_address, "members": group_members}
output = {"EWS.ExpandGroup": group_details}
readable_output = tableToMarkdown("Group Members", group_members)
return readable_output, output, group_details
def get_searchable_mailboxes(client: EWSClient):
"""
Retrieve searchable mailboxes command
:param client: EWS Client
:return: Searchable mailboxes output tuple
"""
searchable_mailboxes = GetSearchableMailboxes(protocol=client.protocol).call()
readable_output = tableToMarkdown(
"Searchable mailboxes", searchable_mailboxes, headers=["displayName", "mailbox"]
)
output = {"EWS.Mailboxes": searchable_mailboxes}
return readable_output, output, searchable_mailboxes
def delete_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
"""
Deletes attachments for a given message
:param client: EWS Client
:param item_id: item id
:param (Optional) target_mailbox: target mailbox
:param (Optional) attachment_ids: attachment ids to delete
:return: entries that were delted
"""
attachments = client.get_attachments_for_item(
item_id, target_mailbox, attachment_ids
)
deleted_file_attachments = []
deleted_item_attachments = [] # type: ignore
for attachment in attachments:
attachment_deleted_action = {
ATTACHMENT_ID: attachment.attachment_id.id,
ACTION: "deleted",
}
if isinstance(attachment, FileAttachment):
deleted_file_attachments.append(attachment_deleted_action)
else:
deleted_item_attachments.append(attachment_deleted_action)
attachment.detach()
entries = []
if len(deleted_file_attachments) > 0:
entry = get_entry_for_object(
"Deleted file attachments",
"EWS.Items" + CONTEXT_UPDATE_FILE_ATTACHMENT,
deleted_file_attachments,
)
entries.append(entry)
if len(deleted_item_attachments) > 0:
entry = get_entry_for_object(
"Deleted item attachments",
"EWS.Items" + CONTEXT_UPDATE_ITEM_ATTACHMENT,
deleted_item_attachments,
)
entries.append(entry)
return entries
def fetch_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
"""
Fetches attachments for a message
:param client: EWS Client
:param item_id: item id
:param (Optional) target_mailbox: target mailbox
:param (Optional) attachment_ids: attachment ids
:return: list of parsed entries
"""
account = client.get_account(target_mailbox)
attachments = client.get_attachments_for_item(item_id, account, attachment_ids)
entries = []
for attachment in attachments:
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
entries.append(get_entry_for_file_attachment(item_id, attachment))
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
else:
entries.append(
get_entry_for_item_attachment(
item_id, attachment, account.primary_smtp_address
)
)
if attachment.item.mime_content:
entries.append(
fileResult(
get_attachment_name(attachment.name) + ".eml",
attachment.item.mime_content,
)
)
return entries
def move_item_between_mailboxes(
client: EWSClient,
item_id,
destination_mailbox,
destination_folder_path,
source_mailbox=None,
is_public=None,
):
"""
Moves item between mailboxes
:param client: EWS Client
:param item_id: item id
:param destination_mailbox: destination mailbox
:param destination_folder_path: destination folder path
:param (Optional) source_mailbox: source mailbox
:param (Optional) is_public: is the destination folder public
:return: Output tuple
"""
source_account = client.get_account(source_mailbox)
destination_account = client.get_account(destination_mailbox)
is_public = client.is_default_folder(destination_folder_path, is_public)
destination_folder = client.get_folder_by_path(
destination_folder_path, destination_account, is_public
)
item = client.get_item_from_mailbox(source_account, item_id)
exported_items = source_account.export([item])
destination_account.upload([(destination_folder, exported_items[0])])
source_account.bulk_delete([item])
move_result = {
MOVED_TO_MAILBOX: destination_mailbox,
MOVED_TO_FOLDER: destination_folder_path,
}
readable_output = "Item was moved successfully."
output = {f"EWS.Items(val.itemId === '{item_id}')": move_result}
return readable_output, output, move_result
def move_item(
client: EWSClient, item_id, target_folder_path, target_mailbox=None, is_public=None
):
"""
Moves an item within the same mailbox
:param client: EWS Client
:param item_id: item id
:param target_folder_path: target folder path
:param (Optional) target_mailbox: mailbox containing the item
:param (Optional) is_public: is the destination folder public
:return: Output tuple
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, is_public=is_public)
item = client.get_item_from_mailbox(account, item_id)
if isinstance(item, ErrorInvalidIdMalformed):
raise Exception("Item not found")
item.move(target_folder)
move_result = {
NEW_ITEM_ID: item.id,
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: "moved",
}
readable_output = tableToMarkdown("Moved items", move_result)
output = {CONTEXT_UPDATE_EWS_ITEM: move_result}
return readable_output, output, move_result
def delete_items(client: EWSClient, item_ids, delete_type, target_mailbox=None):
"""
Delete items in a mailbox
:param client: EWS Client
:param item_ids: items ids to delete
:param delete_type: delte type soft/hard
:param (Optional) target_mailbox: mailbox containinf the items
:return: Output tuple
"""
deleted_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
delete_type = delete_type.lower()
for item in items:
item_id = item.id
if delete_type == "trash":
item.move_to_trash()
elif delete_type == "soft":
item.soft_delete()
elif delete_type == "hard":
item.delete()
else:
raise Exception(
f'invalid delete type: {delete_type}. Use "trash" \\ "soft" \\ "hard"'
)
deleted_items.append(
{
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: f"{delete_type}-deleted",
}
)
readable_output = tableToMarkdown(
f"Deleted items ({delete_type} delete type)", deleted_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: deleted_items}
return readable_output, output, deleted_items
def search_items_in_mailbox(
client: EWSClient,
query=None,
message_id=None,
folder_path="",
limit=100,
target_mailbox=None,
is_public=None,
selected_fields="all",
):
"""
Search items in mailbox
:param client: EWS Client
:param (Optional) query: query to execute
:param (Optional) message_id: message ids to search
:param (Optional) folder_path: folder path to search
:param (Optional) limit: max amount of items to fetch
:param (Optional) target_mailbox: mailbox containing the items
:param (Optional) is_public: is the targeted folder public
:param (Optional) selected_fields: Selected fields
:return: Output tuple
"""
if not query and not message_id:
return_error("Missing required argument. Provide query or message-id")
if message_id and message_id[0] != "<" and message_id[-1] != ">":
message_id = "<{}>".format(message_id)
account = client.get_account(target_mailbox)
limit = int(limit)
if folder_path.lower() == "inbox":
folders = [account.inbox]
elif folder_path:
is_public = client.is_default_folder(folder_path, is_public)
folders = [client.get_folder_by_path(folder_path, account, is_public)]
else:
folders = account.inbox.parent.walk() # pylint: disable=E1101
items = [] # type: ignore
selected_all_fields = selected_fields == "all"
if selected_all_fields:
restricted_fields = list([x.name for x in Message.FIELDS]) # type: ignore
else:
restricted_fields = set(argToList(selected_fields)) # type: ignore
restricted_fields.update(["id", "message_id"]) # type: ignore
for folder in folders:
if Message not in folder.supported_item_models:
continue
if query:
items_qs = folder.filter(query).only(*restricted_fields)
else:
items_qs = folder.filter(message_id=message_id).only(*restricted_fields)
items += get_limited_number_of_messages_from_qs(items_qs, limit)
if len(items) >= limit:
break
items = items[:limit]
searched_items_result = [
parse_item_as_dict(
item,
account.primary_smtp_address,
camel_case=True,
compact_fields=selected_all_fields,
)
for item in items
]
if not selected_all_fields:
searched_items_result = [
{k: v for (k, v) in i.items() if k in keys_to_camel_case(restricted_fields)}
for i in searched_items_result
]
for item in searched_items_result:
item["itemId"] = item.pop("id", "")
readable_output = tableToMarkdown(
"Searched items",
searched_items_result,
headers=ITEMS_RESULTS_HEADERS if selected_all_fields else None,
)
output = {CONTEXT_UPDATE_EWS_ITEM: searched_items_result}
return readable_output, output, searched_items_result
def get_out_of_office_state(client: EWSClient, target_mailbox=None):
"""
Retrieve get out of office state of the targeted mailbox
:param client: EWS Client
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
oof = account.oof_settings
oof_dict = {
"state": oof.state, # pylint: disable=E1101
"externalAudience": getattr(oof, "external_audience", None),
"start": oof.start.ewsformat() if oof.start else None, # pylint: disable=E1101
"end": oof.end.ewsformat() if oof.end else None, # pylint: disable=E1101
"internalReply": getattr(oof, "internal_replay", None),
"externalReply": getattr(oof, "external_replay", None),
MAILBOX: account.primary_smtp_address,
}
readable_output = tableToMarkdown(
f"Out of office state for {account.primary_smtp_address}", oof_dict
)
output = {f"Account.Email(val.Address == obj.{MAILBOX}).OutOfOffice": oof_dict}
return readable_output, output, oof_dict
def recover_soft_delete_item(
client: EWSClient,
message_ids,
target_folder_path="Inbox",
target_mailbox=None,
is_public=None,
):
"""
Recovers soft deleted items
:param client: EWS Client
:param message_ids: Message ids to recover
:param (Optional) target_folder_path: target folder path
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the target folder public
:return:
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, account, is_public)
recovered_messages = []
message_ids = argToList(message_ids)
items_to_recover = account.recoverable_items_deletions.filter( # pylint: disable=E1101
message_id__in=message_ids
).all() # pylint: disable=E1101
recovered_items = set()
for item in items_to_recover:
recovered_items.add(item)
if len(recovered_items) != len(message_ids):
missing_items = set(message_ids).difference(recovered_items)
raise Exception(
f"Some message ids are missing in recoverable items directory: {missing_items}"
)
for item in recovered_items:
item.move(target_folder)
recovered_messages.append(
{ITEM_ID: item.id, MESSAGE_ID: item.message_id, ACTION: "recovered"}
)
readable_output = tableToMarkdown("Recovered messages", recovered_messages)
output = {CONTEXT_UPDATE_EWS_ITEM: recovered_messages}
return readable_output, output, recovered_messages
def get_contacts(client: EWSClient, limit, target_mailbox=None):
"""
Retrieve contacts of the target mailbox or client mailbox
:param client: EWS Client
:param limit: max amount of contacts to retrieve
:param (Optional) target_mailbox: Target mailbox
:return:
"""
def parse_physical_address(address):
result = {}
for attr in ["city", "country", "label", "state", "street", "zipcode"]:
result[attr] = getattr(address, attr, None)
return result
def parse_phone_number(phone_number):
result = {}
for attr in ["label", "phone_number"]:
result[attr] = getattr(phone_number, attr, None)
return result
def parse_contact(contact):
contact_dict = dict(
(k, v if not isinstance(v, EWSDateTime) else v.ewsformat())
for k, v in list(contact._field_vals())
if isinstance(v, str) or isinstance(v, EWSDateTime)
)
if isinstance(contact, Contact) and contact.physical_addresses:
contact_dict["physical_addresses"] = list(
map(parse_physical_address, contact.physical_addresses)
)
if isinstance(contact, Contact) and contact.phone_numbers:
contact_dict["phone_numbers"] = list(
map(parse_phone_number, contact.phone_numbers)
)
if (
isinstance(contact, Contact)
and contact.email_addresses
and len(contact.email_addresses) > 0
):
contact_dict["emailAddresses"] = [x.email for x in contact.email_addresses]
contact_dict = keys_to_camel_case(contact_dict)
contact_dict = dict((k, v) for k, v in list(contact_dict.items()) if v)
contact_dict.pop("mimeContent", None)
contact_dict["originMailbox"] = target_mailbox
return contact_dict
account = client.get_account(target_mailbox)
contacts = []
for contact in account.contacts.all()[: int(limit)]: # pylint: disable=E1101
contacts.append(parse_contact(contact))
readable_output = tableToMarkdown(f"Email contacts for {target_mailbox}", contacts)
output = {"Account.Email(val.Address == obj.originMailbox).EwsContacts": contacts}
return readable_output, output, contacts
def create_folder(client: EWSClient, new_folder_name, folder_path, target_mailbox=None):
"""
Creates a folder in the target mailbox or the client mailbox
:param client: EWS Client
:param new_folder_name: new folder name
:param folder_path: path of the new folder
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
full_path = os.path.join(folder_path, new_folder_name)
try:
if client.get_folder_by_path(full_path, account):
return f"Folder {full_path} already exists",
except Exception:
pass
parent_folder = client.get_folder_by_path(folder_path, account)
f = Folder(parent=parent_folder, name=new_folder_name)
f.save()
client.get_folder_by_path(full_path, account)
return f"Folder {full_path} created successfully",
def find_folders(client: EWSClient, target_mailbox=None):
"""
Finds folders in the mailbox
:param client: EWS Client
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
root = account.root
if client.is_public_folder:
root = account.public_folders_root
folders = []
for f in root.walk(): # pylint: disable=E1101
folder = folder_to_context_entry(f)
folders.append(folder)
folders_tree = root.tree() # pylint: disable=E1101
readable_output = folders_tree
output = {"EWS.Folders(val.id == obj.id)": folders}
return readable_output, output, folders
def mark_item_as_junk(client: EWSClient, item_id, move_items, target_mailbox=None):
"""
Marks item as junk in the target mailbox or client mailbox
:param client: EWS Client
:param item_id: item ids to mark as junk
:param move_items: "yes" or "no" - to move or not to move to trash
:param (Optional) target_mailbox: target mailbox
:return:
"""
account = client.get_account(target_mailbox)
move_items = move_items.lower() == "yes"
ews_result = MarkAsJunk(account=account).call(item_id=item_id, move_item=move_items)
mark_as_junk_result = {
ITEM_ID: item_id,
}
if ews_result == "Success":
mark_as_junk_result[ACTION] = "marked-as-junk"
else:
raise Exception("Failed mark-item-as-junk with error: " + ews_result)
readable_output = tableToMarkdown("Mark item as junk", mark_as_junk_result)
output = {CONTEXT_UPDATE_EWS_ITEM: mark_as_junk_result}
return readable_output, output, mark_as_junk_result
def get_items_from_folder(
client: EWSClient,
folder_path,
limit=100,
target_mailbox=None,
is_public=None,
get_internal_item="no",
):
"""
Retrieve items from folder path
:param client: EWS Client
:param folder_path: folder path
:param (Optional) limit: max amount of items to retrieve
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the folder public
:param (Optional) get_internal_item: should also retrieve internal items ("no" by default)
:return: Output tuple
"""
account = client.get_account(target_mailbox)
limit = int(limit)
get_internal_item = get_internal_item == "yes"
is_public = client.is_default_folder(folder_path, is_public)
folder = client.get_folder_by_path(folder_path, account, is_public)
qs = folder.filter().order_by("-datetime_created")[:limit]
items = get_limited_number_of_messages_from_qs(qs, limit)
items_result = []
for item in items:
item_attachment = parse_item_as_dict(
item, account.primary_smtp_address, camel_case=True, compact_fields=True
)
for attachment in item.attachments:
if (
get_internal_item
and isinstance(attachment, ItemAttachment)
and isinstance(attachment.item, Message)
):
# if found item attachment - switch item to the attchment
item_attachment = parse_item_as_dict(
attachment.item,
account.primary_smtp_address,
camel_case=True,
compact_fields=True,
)
break
items_result.append(item_attachment)
hm_headers = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"id",
]
readable_output = tableToMarkdown(
"Items in folder " + folder_path, items_result, headers=hm_headers
)
output = {CONTEXT_UPDATE_EWS_ITEM: items_result}
return readable_output, output, items_result
def get_items(client: EWSClient, item_ids, target_mailbox=None):
"""
Get items from target mailbox or client mailbox
:param client: EWS Client
:param item_ids: item ids to retrieve
:param (Optional) target_mailbox: target mailbox to retrieve items from
:return:
"""
item_ids = argToList(item_ids)
account = client.get_account(target_mailbox)
items = client.get_items_from_mailbox(account, item_ids)
items = [x for x in items if isinstance(x, Message)]
items_as_incidents = [parse_incident_from_item(x) for x in items]
items_to_context = [
parse_item_as_dict(x, account.primary_smtp_address, True, True) for x in items
]
readable_output = tableToMarkdown(
"Get items", items_to_context, ITEMS_RESULTS_HEADERS
)
output = {
CONTEXT_UPDATE_EWS_ITEM: items_to_context,
"Email": [email_ec(item) for item in items],
}
return readable_output, output, items_as_incidents
def get_folder(client: EWSClient, folder_path, target_mailbox=None, is_public=None):
"""
Retrieve a folder from the target mailbox or client mailbox
:param client: EWS Client
:param folder_path: folder path to retrieve
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the folder public
:return:
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(folder_path, is_public)
folder = folder_to_context_entry(
client.get_folder_by_path(folder_path, account=account, is_public=is_public)
)
readable_output = tableToMarkdown(f"Folder {folder_path}", folder)
output = {CONTEXT_UPDATE_FOLDER: folder}
return readable_output, output, folder
def folder_to_context_entry(f):
"""
Create a context entry from a folder response
:param f: folder response
:return: dict context entry
"""
try:
f_entry = {
"name": f.name,
"totalCount": f.total_count,
"id": f.id,
"childrenFolderCount": f.child_folder_count,
"changeKey": f.changekey,
}
if "unread_count" in [x.name for x in Folder.FIELDS]:
f_entry["unreadCount"] = f.unread_count
return f_entry
except AttributeError:
if isinstance(f, dict):
return {
"name": f.get("name"),
"totalCount": f.get("total_count"),
"id": f.get("id"),
"childrenFolderCount": f.get("child_folder_count"),
"changeKey": f.get("changekey"),
"unreadCount": f.get("unread_count"),
}
def mark_item_as_read(
client: EWSClient, item_ids, operation="read", target_mailbox=None
):
"""
Marks item as read
:param client: EWS Client
:param item_ids: items ids to mark as read
:param (Optional) operation: operation to execute
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
marked_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
items = [x for x in items if isinstance(x, Message)]
for item in items:
item.is_read = operation == "read"
item.save()
marked_items.append(
{
ITEM_ID: item.id,
MESSAGE_ID: item.message_id,
ACTION: "marked-as-{}".format(operation),
}
)
readable_output = tableToMarkdown(
f"Marked items ({operation} marked operation)", marked_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: marked_items}
return readable_output, output, marked_items
def random_word_generator(length):
"""Generate a random string of given length
"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def handle_html(html_body):
"""
Extract all data-url content from within the html and return as separate attachments.
Due to security implications, we support only images here
We might not have Beautiful Soup so just do regex search
"""
attachments = []
clean_body = ''
last_index = 0
for i, m in enumerate(
re.finditer(r'<img.+?src=\"(data:(image\/.+?);base64,([a-zA-Z0-9+/=\r\n]+?))\"', html_body, re.I)):
attachment = {
'data': base64.b64decode(m.group(3)),
'name': f'image{i}'
}
attachment['cid'] = f'{attachment["name"]}@{random_word_generator(8)}.{random_word_generator(8)}'
attachments.append(attachment)
clean_body += html_body[last_index:m.start(1)] + 'cid:' + attachment['cid']
last_index = m.end() - 1
clean_body += html_body[last_index:]
return clean_body, attachments
def collect_manual_attachments(manualAttachObj):
"""Collect all manual attachments' data
Args:
manualAttachObj (str): String representation of the manually attached files list.
Returns:
List[Dict]. List of the files data.
"""
manually_attached_objects = argToList(manualAttachObj)
attachments = []
for attachment in manually_attached_objects:
file_res = demisto.getFilePath(os.path.basename(attachment['RealFileName']))
path = file_res['path']
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': attachment['FileName'],
'data': data,
'cid': ''
})
return attachments
def collect_attachments(attachments_ids, attachments_cids, attachments_names):
"""Collect all attachments' data
Args:
attachments_ids (str): String representation of the files ids list.
attachments_cids (str): String representation of the files content ids list.
attachments_names (str): String representation of the files names list.
Returns:
List[Dict]. List of the files data.
"""
attachments = []
files_ids = argToList(attachments_ids)
files_cids = argToList(attachments_cids)
files_names = argToList(attachments_names)
for index, file_id in enumerate(files_ids):
try:
file_res = demisto.getFilePath(file_id)
path = file_res['path']
if len(files_names) > index and files_names[index]:
filename = files_names[index]
else:
filename = file_res['name']
if len(files_cids) > index and files_cids[index]:
cid = files_cids[index]
else:
cid = ''
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': filename,
'data': data,
'cid': cid
})
except Exception as e:
demisto.error(f'Invalid entry {file_id} with exception: {e}')
return_error(f'Entry {file_id} is not valid or is not a file entry')
return attachments
def handle_transient_files(transient_files, transient_files_contents, transient_files_cids):
"""Creates the transient attachments data
Args:
transient_files (str): String representation of the transient files names list.
transient_files_contents (str): String representation of the transient files content list.
transient_files_cids (str): String representation of the transient files content ids list.
Returns:
List[Dict]. List of the transient files data.
"""
transient_attachments = []
files_names = argToList(transient_files)
files_contents = argToList(transient_files_contents)
files_cids = argToList(transient_files_cids)
for index in range(len(files_names)):
file_name = files_names[index]
if index >= len(files_contents):
break
file_content = bytes(files_contents[index], UTF_8)
if index >= len(files_cids):
file_cid = ''
else:
file_cid = files_cids[index]
transient_attachments.append({
'name': file_name,
'data': file_content,
'cid': file_cid
})
return transient_attachments
def handle_template_params(template_params):
"""Translates the template params if they exist from the context
Args:
template_params (str): JSON string that represent the variables names to be replaced and the desired value.
Value can be either real value or context key to fetch the value from.
Returns:
Dict. `variable_name: value_to_use` of the templated parameters.
"""
actual_params = {}
if template_params:
try:
params = json.loads(template_params)
for p in params:
if params[p].get('value'):
actual_params[p] = params[p]['value']
elif params[p].get('key'):
actual_params[p] = demisto.dt(demisto.context(), params[p]['key'])
except ValueError as e:
return_error('Unable to parse template_params: %s' % (str(e)))
return actual_params
def create_message_object(to, cc, bcc, subject, body, additional_headers):
"""Creates the message object according to the existence of additional custom headers.
"""
if additional_headers:
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body,
**additional_headers
)
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body
)
def create_message(to, subject='', body='', bcc=None, cc=None, html_body=None, attachments=None,
additional_headers=None):
"""Creates the Message object that will be sent.
Args:
to (list): Main recipients.
cc (list): CC recipients.
bcc (list): BCC recipients.
subject (str): Email's subject.
body (str): Email's simple text body.
html_body (str): Email's html body.
attachments (list): Files to be attached to the mail, both inline and as files.
additional_headers (Dict): Custom headers to be added to the message.
Returns:
Message. Message object ready to be sent.
"""
if not html_body:
# This is a simple text message - we cannot have CIDs here
message = create_message_object(to, cc, bcc, subject, body, additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
message.attach(new_attachment)
else:
html_body, html_attachments = handle_html(html_body)
attachments += html_attachments
message = create_message_object(to, cc, bcc, subject, HTMLBody(html_body), additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
else:
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'),
is_inline=True, content_id=attachment.get('cid'))
message.attach(new_attachment)
return message
def add_additional_headers(additional_headers):
"""Adds custom headers to the Message object
Args:
additional_headers (str): Headers list as string. Example: headerName1=headerValue1,headerName2=headerValue2
Returns:
Dict. Headers dictionary in the form of: `header_name: header value`
"""
headers = dict()
for header in argToList(additional_headers):
header_name, header_value = header.split('=', 1)
class TempClass(ExtendedProperty):
distinguished_property_set_id = 'InternetHeaders'
property_name = header_name
property_type = 'String'
try:
Message.register(header_name, TempClass)
headers[header_name] = header_value
except ValueError as e:
demisto.debug('EWSO365 - Header ' + header_name + ' could not be registered. ' + str(e))
return headers
def send_email(client: EWSClient, to, subject='', body="", bcc=None, cc=None, htmlBody=None,
attachIDs="", attachCIDs="", attachNames="", manualAttachObj=None,
transientFile=None, transientFileContent=None, transientFileCID=None, templateParams=None,
additionalHeader=None, raw_message=None):
to = argToList(to)
cc = argToList(cc)
bcc = argToList(bcc)
# Basic validation - we allow pretty much everything but you have to have at least a recipient
# We allow messages without subject and also without body
if not to and not cc and not bcc:
return_error('You must have at least one recipient')
if raw_message:
message = Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
body=raw_message
)
else:
if additionalHeader:
additionalHeader = add_additional_headers(additionalHeader)
# collect all types of attachments
attachments = collect_attachments(attachIDs, attachCIDs, attachNames)
attachments.extend(collect_manual_attachments(manualAttachObj))
attachments.extend(handle_transient_files(transientFile, transientFileContent, transientFileCID))
# update body and html_body with the templated params, if exists
template_params = handle_template_params(templateParams)
if template_params:
body = body.format(**template_params)
if htmlBody:
htmlBody = htmlBody.format(**template_params)
message = create_message(to, subject, body, bcc, cc, htmlBody, attachments, additionalHeader)
client.send_email(message)
return 'Mail sent successfully', {}, {}
def get_item_as_eml(client: EWSClient, item_id, target_mailbox=None):
"""
Retrieve item as an eml
:param client: EWS Client
:param item_id: Item id to retrieve
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
item = client.get_item_from_mailbox(account, item_id)
if item.mime_content:
mime_content = item.mime_content
if isinstance(mime_content, bytes):
email_content = email.message_from_bytes(mime_content)
else:
email_content = email.message_from_string(mime_content)
if item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(email_content.items())
]
for header in item.headers:
if (
header.name,
header.value,
) not in attached_email_headers and header.name != "Content-Type":
email_content.add_header(header.name, header.value)
eml_name = item.subject if item.subject else "demisto_untitled_eml"
file_result = fileResult(eml_name + ".eml", email_content.as_string())
file_result = (
file_result if file_result else "Failed uploading eml file to war room"
)
return file_result
def parse_incident_from_item(item):
"""
Parses an incident from an item
:param item: item to parse
:return: Parsed item
"""
incident = {}
labels = []
try:
incident["details"] = item.text_body or item.body
except AttributeError:
incident["details"] = item.body
incident["name"] = item.subject
labels.append({"type": "Email/subject", "value": item.subject})
incident["occurred"] = item.datetime_created.ewsformat()
# handle recipients
if item.to_recipients:
for recipient in item.to_recipients:
labels.append({"type": "Email", "value": recipient.email_address})
# handle cc
if item.cc_recipients:
for recipient in item.cc_recipients:
labels.append({"type": "Email/cc", "value": recipient.email_address})
# handle email from
if item.sender:
labels.append({"type": "Email/from", "value": item.sender.email_address})
# email format
email_format = ""
try:
if item.text_body:
labels.append({"type": "Email/text", "value": item.text_body})
email_format = "text"
except AttributeError:
pass
if item.body:
labels.append({"type": "Email/html", "value": item.body})
email_format = "HTML"
labels.append({"type": "Email/format", "value": email_format})
# handle attachments
if item.attachments:
incident["attachment"] = []
for attachment in item.attachments:
file_result = None
label_attachment_type = None
label_attachment_id_type = None
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
# file attachment
label_attachment_type = "attachments"
label_attachment_id_type = "attachmentId"
# save the attachment
file_name = get_attachment_name(attachment.name)
file_result = fileResult(file_name, attachment.content)
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name),
}
)
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
continue
else:
# other item attachment
label_attachment_type = "attachmentItems"
label_attachment_id_type = "attachmentItemsId"
# save the attachment
if attachment.item.mime_content:
mime_content = attachment.item.mime_content
attached_email = email.message_from_bytes(mime_content) if isinstance(mime_content, bytes) \
else email.message_from_string(mime_content)
if attachment.item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(attached_email.items())
]
for header in attachment.item.headers:
if (
(header.name, header.value)
not in attached_email_headers
and header.name != "Content-Type"
):
attached_email.add_header(header.name, header.value)
file_result = fileResult(
get_attachment_name(attachment.name) + ".eml",
attached_email.as_bytes().decode('utf-8'),
)
if file_result:
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name) + ".eml",
}
)
labels.append(
{
"type": label_attachment_type,
"value": get_attachment_name(attachment.name),
}
)
labels.append(
{"type": label_attachment_id_type, "value": attachment.attachment_id.id}
)
# handle headers
if item.headers:
headers = []
for header in item.headers:
labels.append(
{
"type": "Email/Header/{}".format(header.name),
"value": str(header.value),
}
)
headers.append("{}: {}".format(header.name, header.value))
labels.append({"type": "Email/headers", "value": "\r\n".join(headers)})
# handle item id
if item.message_id:
labels.append({"type": "Email/MessageId", "value": str(item.message_id)})
if item.id:
labels.append({"type": "Email/ID", "value": item.id})
labels.append({"type": "Email/itemId", "value": item.id})
# handle conversion id
if item.conversation_id:
labels.append({"type": "Email/ConversionID", "value": item.conversation_id.id})
incident["labels"] = labels
incident["rawJSON"] = json.dumps(parse_item_as_dict(item, None), ensure_ascii=False)
return incident
def fetch_emails_as_incidents(client: EWSClient, last_run):
"""
Fetch incidents
:param client: EWS Client
:param last_run: last run dict
:return:
"""
last_run = get_last_run(client, last_run)
try:
last_emails = fetch_last_emails(
client,
client.folder_name,
last_run.get(LAST_RUN_TIME),
last_run.get(LAST_RUN_IDS),
)
ids = deque(
last_run.get(LAST_RUN_IDS, []), maxlen=client.last_run_ids_queue_size
)
incidents = []
incident: Dict[str, str] = {}
for item in last_emails:
if item.message_id:
ids.append(item.message_id)
incident = parse_incident_from_item(item)
incidents.append(incident)
if len(incidents) >= client.max_fetch:
break
last_run_time = incident.get("occurred", last_run.get(LAST_RUN_TIME))
if isinstance(last_run_time, EWSDateTime):
last_run_time = last_run_time.ewsformat()
new_last_run = {
LAST_RUN_TIME: last_run_time,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: list(ids),
ERROR_COUNTER: 0,
}
demisto.setLastRun(new_last_run)
return incidents
except RateLimitError:
if LAST_RUN_TIME in last_run:
last_run[LAST_RUN_TIME] = last_run[LAST_RUN_TIME].ewsformat()
if ERROR_COUNTER not in last_run:
last_run[ERROR_COUNTER] = 0
last_run[ERROR_COUNTER] += 1
demisto.setLastRun(last_run)
if last_run[ERROR_COUNTER] > 2:
raise
return []
def fetch_last_emails(
client: EWSClient, folder_name="Inbox", since_datetime=None, exclude_ids=None
):
"""
Fetches last emails
:param client: EWS client
:param (Optional) folder_name: folder name to pull from
:param (Optional) since_datetime: items will be searched after this datetime
:param (Optional) exclude_ids: exclude ids from fetch
:return: list of exchangelib.Items
"""
qs = client.get_folder_by_path(folder_name, is_public=client.is_public_folder)
if since_datetime:
qs = qs.filter(datetime_received__gte=since_datetime)
else:
last_10_min = EWSDateTime.now(tz=EWSTimeZone.timezone("UTC")) - timedelta(
minutes=10
)
qs = qs.filter(last_modified_time__gte=last_10_min)
qs = qs.filter().only(*[x.name for x in Message.FIELDS])
qs = qs.filter().order_by("datetime_received")
result = qs.all()
result = [x for x in result if isinstance(x, Message)]
if exclude_ids and len(exclude_ids) > 0:
exclude_ids = set(exclude_ids)
result = [x for x in result if x.message_id not in exclude_ids]
return result
def test_module(client: EWSClient, max_fetch):
"""
test-module
* Max incidents per fetch <= MAX_INCIDENTS_PER_FETCH
* Account can be retrieved
* Account has read rights
* Test access to fetch folder
:param client: EWS Client
:param max_fetch: Max fetches per incident
:return: "ok"
"""
try:
if int(max_fetch) > MAX_INCIDENTS_PER_FETCH:
return_error(f'Error - Max incidents per fetch cannot be greater than {MAX_INCIDENTS_PER_FETCH}. '
f'You provided: {max_fetch}')
account = client.get_account()
if not account.root.effective_rights.read: # pylint: disable=E1101
raise Exception(
"Success to authenticate, but user has no permissions to read from the mailbox. "
"Need to delegate the user permissions to the mailbox - "
"please read integration documentation and follow the instructions"
)
client.get_folder_by_path(
client.folder_name, account, client.is_public_folder
).test_access()
except ErrorFolderNotFound as e:
if "Top of Information Store" in str(e):
raise Exception(
"Success to authenticate, but user probably has no permissions to read from the specific folder."
"Check user permissions. You can try !ews-find-folders command to "
"get all the folders structure that the user has permissions to"
)
return "ok"
def sub_main():
is_test_module = False
params = demisto.params()
args = prepare_args(demisto.args())
# client's default_target_mailbox is the authorization source for the instance
params['default_target_mailbox'] = args.get('target_mailbox',
args.get('source_mailbox', params['default_target_mailbox']))
client = EWSClient(**params)
start_logging()
try:
command = demisto.command()
# commands that return a single note result
normal_commands = {
"ews-get-searchable-mailboxes": get_searchable_mailboxes,
"ews-move-item-between-mailboxes": move_item_between_mailboxes,
"ews-move-item": move_item,
"ews-delete-items": delete_items,
"ews-search-mailbox": search_items_in_mailbox,
"ews-get-contacts": get_contacts,
"ews-get-out-of-office": get_out_of_office_state,
"ews-recover-messages": recover_soft_delete_item,
"ews-create-folder": create_folder,
"ews-mark-item-as-junk": mark_item_as_junk,
"ews-find-folders": find_folders,
"ews-get-items-from-folder": get_items_from_folder,
"ews-get-items": get_items,
"ews-get-folder": get_folder,
"ews-expand-group": get_expanded_group,
"ews-mark-items-as-read": mark_item_as_read,
"send-mail": send_email,
}
# commands that may return multiple results or non-note result
special_output_commands = {
"ews-get-attachment": fetch_attachments_for_message,
"ews-delete-attachment": delete_attachments_for_message,
"ews-get-items-as-eml": get_item_as_eml,
}
# system commands:
if command == "test-module":
is_test_module = True
demisto.results(test_module(client, params.get('max_fetch')))
elif command == "fetch-incidents":
last_run = demisto.getLastRun()
incidents = fetch_emails_as_incidents(client, last_run)
demisto.incidents(incidents)
# special outputs commands
elif command in special_output_commands:
demisto.results(special_output_commands[command](client, **args)) # type: ignore[operator]
# normal commands
else:
output = normal_commands[command](client, **args) # type: ignore[operator]
return_outputs(*output)
except Exception as e:
start_logging()
debug_log = log_stream.getvalue() # type: ignore[union-attr]
error_message_simple = ""
# Office365 regular maintenance case
if isinstance(e, ErrorMailboxStoreUnavailable) or isinstance(
e, ErrorMailboxMoveInProgress
):
log_message = (
"Office365 is undergoing load balancing operations. "
"As a result, the service is temporarily unavailable."
)
if demisto.command() == "fetch-incidents":
demisto.info(log_message)
demisto.incidents([])
sys.exit(0)
if is_test_module:
demisto.results(
log_message + " Please retry the instance configuration test."
)
sys.exit(0)
error_message_simple = log_message + " Please retry your request."
if isinstance(e, ConnectionError):
error_message_simple = (
"Could not connect to the server.\n"
f"Additional information: {str(e)}"
)
else:
if is_test_module and isinstance(e, MalformedResponseError):
error_message_simple = (
"Got invalid response from the server.\n"
)
# Legacy error handling
if "Status code: 401" in debug_log:
error_message_simple = (
"Got unauthorized from the server. "
)
if "Status code: 503" in debug_log:
error_message_simple = (
"Got timeout from the server. "
"Probably the server is not reachable with the current settings. "
)
if not error_message_simple:
error_message = error_message_simple = str(e)
else:
error_message = error_message_simple + "\n" + str(e)
stacktrace = traceback.format_exc()
if stacktrace:
error_message += "\nFull stacktrace:\n" + stacktrace
if debug_log:
error_message += "\nFull debug log:\n" + debug_log
if demisto.command() == "fetch-incidents":
raise
if demisto.command() == "ews-search-mailbox" and isinstance(e, ValueError):
return_error(
message="Selected invalid field, please specify valid field name.",
error=e,
)
if is_test_module:
demisto.results(error_message_simple)
else:
demisto.results(
{
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": error_message_simple,
}
)
demisto.error(f"{e.__class__.__name__}: {error_message}")
finally:
exchangelib_cleanup()
if log_stream:
try:
logging.getLogger().removeHandler(log_handler) # type: ignore
log_stream.close()
except Exception as ex:
demisto.error(
"EWS: unexpected exception when trying to remove log handler: {}".format(
ex
)
)
def process_main():
"""setup stdin to fd=0 so we can read from the server"""
sys.stdin = os.fdopen(0, "r")
sub_main()
def main():
# When running big queries, like 'ews-search-mailbox' the memory might not freed by the garbage
# collector. `separate_process` flag will run the integration on a separate process that will prevent
# memory leakage.
separate_process = demisto.params().get("separate_process", False)
demisto.debug("Running as separate_process: {}".format(separate_process))
if separate_process:
try:
p = Process(target=process_main)
p.start()
p.join()
except Exception as ex:
demisto.error("Failed starting Process: {}".format(ex))
else:
sub_main()
from MicrosoftApiModule import * # noqa: E402
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
|
#
# KTH Royal Institute of Technology
# DD2424: Deep Learning in Data Science
# Assignment 4
#
# Carlo Rapisarda ([email protected])
#
import numpy as np
import matplotlib.pyplot as plt
import dataset as dt
from os.path import exists
from model import RNNet
from utilities import compute_grads_numerical, compare_grads, unpickle, pickle, eprint, simple_smooth_1d
GOBLET_RESULTS_PATH = '../goblet_results.pkl'
def check_gradients():
book = dt.load_goblet_of_fire()
seq_len = 25
m = 5
X, Y, _ = book.get_labeled_data(0, seq_len)
h0 = np.zeros((m, 1))
np.random.seed(42)
net = RNNet(m=m, K=book.K)
print('===> Computing numerical gradients...')
num_grads = compute_grads_numerical(X, Y, h0, net)
print('===> Computing analytical gradients...')
grads = net._backward(X, Y, h0, *net._forward(X, h0))
errors = compare_grads(num_grads, grads, m, book.K)
errors_v = vars(errors)
for k in errors_v:
v = errors_v[k]
print(f'MSEs for {k} -> max: {v.max()},\t avg: {v.mean()},\t std: {v.std()}')
def train_with_goblet_of_fire(results_path=None):
book = dt.load_goblet_of_fire()
np.random.seed(42)
net = RNNet(m=100, K=book.K)
# optimizer = RNNet.AdaGrad(net, eta=0.1)
optimizer = RNNet.RMSProp(net, eta=0.001, gamma=0.9)
config = {
'epochs': 10,
'output_folder': '../out',
'optimizer': optimizer,
'sequence_length': 25,
'record_interval': 1_000,
'test_length': 200
}
res = net.train(book, config)
if results_path is not None:
pickle(res, results_path)
return res
def plot_results(res, fig_path=None):
interval = res['interval']
smooth_losses_by_interval = res['smooth_losses_by_interval']
smooth_losses_by_epoch = res['smooth_losses_by_epoch']
epochs = len(smooth_losses_by_epoch)
iters_per_epoch = 1.0 * len(smooth_losses_by_interval) * interval / epochs
smoother = np.array(smooth_losses_by_interval)
smoother = simple_smooth_1d(smoother, 0.95)
fig = plt.figure(figsize=(9, 4))
ax1 = fig.add_subplot(111)
ax1.plot(np.arange(len(smooth_losses_by_interval)) * interval, smooth_losses_by_interval)
ax1.plot(np.arange(smoother.size) * interval, smoother)
ax1.set_xlabel('step')
ax1.set_ylabel('loss')
ax2 = ax1.twiny()
ax2.set_xlabel('epoch')
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(np.arange(1,epochs+1) * iters_per_epoch)
ax2.set_xticklabels(np.arange(1,epochs+1))
ax2.grid()
ax1.grid(axis='y')
fig.tight_layout()
fig.legend(['training loss', 'smoothed'], bbox_to_anchor=(0.98, 0.86), bbox_transform=fig.transFigure)
if fig_path is not None:
fig.savefig(fig_path, bbox_inches='tight')
fig.show()
def print_evolution(res, interval, limit=None):
smooth_losses = res['smooth_losses_by_interval']
synth_samples = res['synthesized_text_by_interval']
res_interval = res['interval']
assert interval % res_interval == 0, 'Print interval must be a multiple of the recorded interval'
selected_indexes = [x for x in range(0, len(synth_samples), interval // res_interval)]
if limit is not None:
selected_indexes = selected_indexes[:limit]
# last_step = selected_indexes[-1] * res_interval
# print(f'\nModel evolution from step 1 to {last_step}:\n')
print('\n')
for i in selected_indexes:
step = max(i * res_interval, 1)
text = synth_samples[i]
smooth_loss = smooth_losses[i]
print(f'===> Step: {step}, smooth_loss: {round(smooth_loss, 4)}, synthesized:\n{text}\n\n')
def synthesize_with_best_model():
model_path = '../trained_models/2018-06-12-2205-e10.pkl'
if exists(model_path):
book = dt.load_goblet_of_fire()
net = RNNet.import_model(model_path)
np.random.seed(50)
print(net.synthesize(1000, book.char_to_one_hot, book.index_to_char))
else:
eprint('Best trained model found!')
def main():
check_gradients()
if not exists(GOBLET_RESULTS_PATH):
train_with_goblet_of_fire(GOBLET_RESULTS_PATH)
results = unpickle(GOBLET_RESULTS_PATH)
plot_results(results, '../Report/Figs/training_goblet.eps')
print_evolution(results, 10_000, 11)
print(f'===> Passage from the final model (smooth_loss: {results['smooth_losses_by_epoch'][-1]}):')
synthesize_with_best_model()
if __name__ == '__main__':
main()
|
#
# KTH Royal Institute of Technology
# DD2424: Deep Learning in Data Science
# Assignment 4
#
# Carlo Rapisarda ([email protected])
#
import numpy as np
import matplotlib.pyplot as plt
import dataset as dt
from os.path import exists
from model import RNNet
from utilities import compute_grads_numerical, compare_grads, unpickle, pickle, eprint, simple_smooth_1d
GOBLET_RESULTS_PATH = '../goblet_results.pkl'
def check_gradients():
book = dt.load_goblet_of_fire()
seq_len = 25
m = 5
X, Y, _ = book.get_labeled_data(0, seq_len)
h0 = np.zeros((m, 1))
np.random.seed(42)
net = RNNet(m=m, K=book.K)
print('===> Computing numerical gradients...')
num_grads = compute_grads_numerical(X, Y, h0, net)
print('===> Computing analytical gradients...')
grads = net._backward(X, Y, h0, *net._forward(X, h0))
errors = compare_grads(num_grads, grads, m, book.K)
errors_v = vars(errors)
for k in errors_v:
v = errors_v[k]
print(f'MSEs for {k} -> max: {v.max()},\t avg: {v.mean()},\t std: {v.std()}')
def train_with_goblet_of_fire(results_path=None):
book = dt.load_goblet_of_fire()
np.random.seed(42)
net = RNNet(m=100, K=book.K)
# optimizer = RNNet.AdaGrad(net, eta=0.1)
optimizer = RNNet.RMSProp(net, eta=0.001, gamma=0.9)
config = {
'epochs': 10,
'output_folder': '../out',
'optimizer': optimizer,
'sequence_length': 25,
'record_interval': 1_000,
'test_length': 200
}
res = net.train(book, config)
if results_path is not None:
pickle(res, results_path)
return res
def plot_results(res, fig_path=None):
interval = res['interval']
smooth_losses_by_interval = res['smooth_losses_by_interval']
smooth_losses_by_epoch = res['smooth_losses_by_epoch']
epochs = len(smooth_losses_by_epoch)
iters_per_epoch = 1.0 * len(smooth_losses_by_interval) * interval / epochs
smoother = np.array(smooth_losses_by_interval)
smoother = simple_smooth_1d(smoother, 0.95)
fig = plt.figure(figsize=(9, 4))
ax1 = fig.add_subplot(111)
ax1.plot(np.arange(len(smooth_losses_by_interval)) * interval, smooth_losses_by_interval)
ax1.plot(np.arange(smoother.size) * interval, smoother)
ax1.set_xlabel('step')
ax1.set_ylabel('loss')
ax2 = ax1.twiny()
ax2.set_xlabel('epoch')
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(np.arange(1,epochs+1) * iters_per_epoch)
ax2.set_xticklabels(np.arange(1,epochs+1))
ax2.grid()
ax1.grid(axis='y')
fig.tight_layout()
fig.legend(['training loss', 'smoothed'], bbox_to_anchor=(0.98, 0.86), bbox_transform=fig.transFigure)
if fig_path is not None:
fig.savefig(fig_path, bbox_inches='tight')
fig.show()
def print_evolution(res, interval, limit=None):
smooth_losses = res['smooth_losses_by_interval']
synth_samples = res['synthesized_text_by_interval']
res_interval = res['interval']
assert interval % res_interval == 0, 'Print interval must be a multiple of the recorded interval'
selected_indexes = [x for x in range(0, len(synth_samples), interval // res_interval)]
if limit is not None:
selected_indexes = selected_indexes[:limit]
# last_step = selected_indexes[-1] * res_interval
# print(f'\nModel evolution from step 1 to {last_step}:\n')
print('\n')
for i in selected_indexes:
step = max(i * res_interval, 1)
text = synth_samples[i]
smooth_loss = smooth_losses[i]
print(f'===> Step: {step}, smooth_loss: {round(smooth_loss, 4)}, synthesized:\n{text}\n\n')
def synthesize_with_best_model():
model_path = '../trained_models/2018-06-12-2205-e10.pkl'
if exists(model_path):
book = dt.load_goblet_of_fire()
net = RNNet.import_model(model_path)
np.random.seed(50)
print(net.synthesize(1000, book.char_to_one_hot, book.index_to_char))
else:
eprint('Best trained model found!')
def main():
check_gradients()
if not exists(GOBLET_RESULTS_PATH):
train_with_goblet_of_fire(GOBLET_RESULTS_PATH)
results = unpickle(GOBLET_RESULTS_PATH)
plot_results(results, '../Report/Figs/training_goblet.eps')
print_evolution(results, 10_000, 11)
print(f'===> Passage from the final model (smooth_loss: {results["smooth_losses_by_epoch"][-1]}):')
synthesize_with_best_model()
if __name__ == '__main__':
main()
|
#encoding:utf-8
import torch
import numpy as np
from ..utils.utils import model_device,load_bert
class Predicter(object):
def __init__(self,
model,
logger,
n_gpu,
model_path
):
self.model = model
self.logger = logger
self.width = 30
self.model, self.device = model_device(n_gpu= n_gpu, model=self.model, logger=self.logger)
loads = load_bert(model_path=model_path,model = self.model)
self.model = loads[0]
def show_info(self,batch_id,n_batch):
recv_per = int(100 * (batch_id + 1) / n_batch)
if recv_per >= 100:
recv_per = 100
# show bar
show_bar = f"\r[predict]{batch_id+1}/{n_batch}[{int(self.width * recv_per / 100) * ">":<{self.width}s}]{recv_per}%"
print(show_bar,end='')
def predict(self,data):
all_logits = None
self.model.eval()
n_batch = len(data)
with torch.no_grad():
for step, (input_ids, input_mask, segment_ids, label_ids) in enumerate(data):
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
logits = self.model(input_ids, segment_ids, input_mask)
logits = logits.sigmoid()
self.show_info(step,n_batch)
if all_logits is None:
all_logits = logits.detach().cpu().numpy()
else:
all_logits = np.concatenate([all_logits,logits.detach().cpu().numpy()],axis = 0)
return all_logits
|
#encoding:utf-8
import torch
import numpy as np
from ..utils.utils import model_device,load_bert
class Predicter(object):
def __init__(self,
model,
logger,
n_gpu,
model_path
):
self.model = model
self.logger = logger
self.width = 30
self.model, self.device = model_device(n_gpu= n_gpu, model=self.model, logger=self.logger)
loads = load_bert(model_path=model_path,model = self.model)
self.model = loads[0]
def show_info(self,batch_id,n_batch):
recv_per = int(100 * (batch_id + 1) / n_batch)
if recv_per >= 100:
recv_per = 100
# show bar
show_bar = f"\r[predict]{batch_id+1}/{n_batch}[{int(self.width * recv_per / 100) * '>':<{self.width}s}]{recv_per}%"
print(show_bar,end='')
def predict(self,data):
all_logits = None
self.model.eval()
n_batch = len(data)
with torch.no_grad():
for step, (input_ids, input_mask, segment_ids, label_ids) in enumerate(data):
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
logits = self.model(input_ids, segment_ids, input_mask)
logits = logits.sigmoid()
self.show_info(step,n_batch)
if all_logits is None:
all_logits = logits.detach().cpu().numpy()
else:
all_logits = np.concatenate([all_logits,logits.detach().cpu().numpy()],axis = 0)
return all_logits
|
# -*- coding: utf-8 -*-
"""
test_doc_table
~~~~~~~~~~~~~~
Test the Table Document element.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import unittest
from chemdataextractor.doc.table import Table, Cell
from chemdataextractor.doc.text import Caption
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
class TestTable(unittest.TestCase):
"""Simple Table instantiation tests."""
maxDiff = None
def test_uvvis_table(self):
""""""
t = Table(
caption=Caption('Selected photophysical properties of biarylsubstituted pyrazoles 5–8 and 1-methyl-3,5-diphenylpyrazole (9) at room temperature'),
headings=[
[
Cell('Compound'),
Cell('Absorption maxima λmax,abs (ε) [nm] (L cm−1 mol−1)'),
Cell('Emission maxima λmax,em (Φf) [nm] (a.u.)'),
Cell('Stokes-shift Δṽ [cm−1]')
]
],
rows=[
[Cell(' 5a '), Cell('273.5 (40 100)'), Cell('357.0 (0.77)'), Cell('9400')],
[Cell(' 5b '), Cell('268.5 (36 700)'), Cell('359.0 (0.77)'), Cell('8600')],
[Cell('Coumarin 343'), Cell('263.0 (38 400)'), Cell('344.5 (0.67)'), Cell('9000')],
[Cell(' 5d '), Cell('281.0 (34 200)'), Cell('351.5 (0.97)'), Cell('7100')],
[Cell(' 5e '), Cell('285.0 (44 000)'), Cell('382.0 (0.35)'), Cell('8900')],
[Cell(' 5f '), Cell('289.0 (43 300)'), Cell('363.0 (0.80)'), Cell('7100')],
[Cell(' 5g '), Cell('285.0 (42 000)'), Cell('343.5 (0.86)'), Cell('6000')],
[Cell(' 6a '), Cell('283.5 (35 600)'), Cell('344.5 (0.49)'), Cell('6300')],
[Cell(' 6b '), Cell('267.5 (35 800)'), Cell('338.5 (0.83)'), Cell('7800')],
[Cell(' 6c '), Cell('286.0 (33 000)'), Cell('347.0 (0.27)'), Cell('6200')],
[Cell(' 6d '), Cell('306.5 (36 600)'), Cell('384.0 (0.10)'), Cell('6600')],
[Cell(' 7 '), Cell('288.5 (62 500)'), Cell('367.0 (0.07)'), Cell('7400')],
[Cell('Compound 8a '), Cell('257.0 (36 300), 293.0 sh (25 000)'), Cell('385.0 (0.41)'), Cell('8200')],
[Cell(' 8b '), Cell('257.0 (32 000), 296.0 sh (23000)'), Cell('388.0 (0.33)'), Cell('8000')],
[Cell(' 8c '), Cell('257.0 (27 400), 307.5 (18900)'), Cell('387.0 (0.12)'), Cell('6700')],
[Cell(' 8d '), Cell('268.5 (29 500)'), Cell('385.0 (0.29)'), Cell('11 300')],
[Cell('Dye 8e '), Cell('261.5 (39 900), 288.0 sh (29 600), 311.0 sh (20 500)'), Cell('386.5 (0.37)'), Cell('6300')],
[Cell(' 8f '), Cell('256.5 (27 260), 296.0 (28404)'), Cell('388.5 (0.35)'), Cell('8000')],
[Cell(' 8g '), Cell('272.5 (39 600)'), Cell('394.0 (0.30)'), Cell('11 300')],
[Cell(' 8h '), Cell('286.0 (22 900)'), Cell('382.5 (0.33)'), Cell('8800')],
[Cell(' 9 '), Cell('254.0 (28 800)'), Cell('338.5 (0.40)'), Cell('9800')]]
)
gold = [
{'labels': [u'5a'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'40100', 'value': u'273.5'}]}], 'quantum_yields': [{'value': u'0.77'}]},
{'labels': [u'5b'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'36700', 'value': u'268.5'}]}], 'quantum_yields': [{'value': u'0.77'}]},
{'names': [u'Coumarin 343'], 'quantum_yields': [{'value': u'0.67'}], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'38400', 'value': u'263.0'}]}]},
{'labels': [u'5d'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'34200', 'value': u'281.0'}]}], 'quantum_yields': [{'value': u'0.97'}]},
{'labels': [u'5e'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'44000', 'value': u'285.0'}]}], 'quantum_yields': [{'value': u'0.35'}]},
{'labels': [u'5f'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'43300', 'value': u'289.0'}]}], 'quantum_yields': [{'value': u'0.80'}]},
{'labels': [u'5g'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'42000', 'value': u'285.0'}]}], 'quantum_yields': [{'value': u'0.86'}]},
{'labels': [u'6a'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'35600', 'value': u'283.5'}]}], 'quantum_yields': [{'value': u'0.49'}]},
{'labels': [u'6b'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'35800', 'value': u'267.5'}]}], 'quantum_yields': [{'value': u'0.83'}]},
{'labels': [u'6c'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'33000', 'value': u'286.0'}]}], 'quantum_yields': [{'value': u'0.27'}]},
{'labels': [u'6d'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'36600', 'value': u'306.5'}]}], 'quantum_yields': [{'value': u'0.10'}]},
{'labels': [u'7'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'62500', 'value': u'288.5'}]}], 'quantum_yields': [{'value': u'0.07'}]},
{'labels': [u'8a'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'36300', 'value': u'257.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'25000', 'value': u'293.0'}]}], 'quantum_yields': [{'value': u'0.41'}]},
{'labels': [u'8b'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'32000', 'value': u'257.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'23000', 'value': u'296.0'}]}], 'quantum_yields': [{'value': u'0.33'}]},
{'labels': [u'8c'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'27400', 'value': u'257.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'18900', 'value': u'307.5'}]}], 'quantum_yields': [{'value': u'0.12'}]},
{'labels': [u'8d'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'29500', 'value': u'268.5'}]}], 'quantum_yields': [{'value': u'0.29'}]},
{'labels': [u'8e'], 'quantum_yields': [{'value': u'0.37'}], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'39900', 'value': u'261.5'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'29600', 'value': u'288.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'20500', 'value': u'311.0'}]}]},
{'labels': [u'8f'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'27260', 'value': u'256.5'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'28404', 'value': u'296.0'}]}], 'quantum_yields': [{'value': u'0.35'}]},
{'labels': [u'8g'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'39600', 'value': u'272.5'}]}], 'quantum_yields': [{'value': u'0.30'}]},
{'labels': [u'8h'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'22900', 'value': u'286.0'}]}], 'quantum_yields': [{'value': u'0.33'}]},
{'labels': [u'9'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'28800', 'value': u'254.0'}]}], 'quantum_yields': [{'value': u'0.40'}]},
]
for record in t.records:
print(record.serialize())
self.assertEqual(gold, [record.serialize() for record in t.records])
def test_spectroscopic_table(self):
""""""
t = Table(
caption=Caption('Spectroscopic properties of Coumarins in acetonitrile at 298 K.'),
headings=[
[
Cell(''), # Blank compound heading
Cell('λmax (nm)'),
Cell('ε (M–1 cm–1)'),
Cell('λem (nm)'),
Cell('ϕ')
]
],
rows=[
[Cell('Coumarin 343'), Cell('398'), Cell('40 800'), Cell('492'), Cell('0.52')],
[Cell('C144'), Cell('429'), Cell('9500'), Cell('601'), Cell('N/A')],
[Cell('Coumarin 34'), Cell('269'), Cell('-'), Cell('435'), Cell('<0.01')],
]
)
# for record in t.caption.records:
# print(record.to_primitive())
# print(record.is_contextual)
gold = [
{'names': ['Coumarin 343'], 'quantum_yields': [{'type': '\u03d5', 'solvent': 'acetonitrile', 'value': '0.52', 'temperature': '298', 'temperature_units': 'K'}], 'uvvis_spectra': [{'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'units': 'nm', 'value': '398'}]}, {'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'extinction': '40800', 'extinction_units': 'M \u2013 1 cm \u2013 1'}]} ]},
{'labels': ['C144'], 'uvvis_spectra': [{'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'units': 'nm', 'value': '429'}]}, {'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'extinction': '9500', 'extinction_units': 'M \u2013 1 cm \u2013 1'}]}]},
{'names': ['Coumarin 34'], 'quantum_yields': [{'type': '\u03d5', 'solvent': 'acetonitrile', 'value': '<0.01', 'temperature': '298', 'temperature_units': 'K'}], 'uvvis_spectra': [{'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'units': 'nm', 'value': '269'}]}]},
{'names': ['Coumarins']},
{'names': ['acetonitrile']}
]
# for record in t.records:
# print(record.to_primitive())
self.assertEqual(gold, [record.serialize() for record in t.records])
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
test_doc_table
~~~~~~~~~~~~~~
Test the Table Document element.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import unittest
from chemdataextractor.doc.table import Table, Cell
from chemdataextractor.doc.text import Caption
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
class TestTable(unittest.TestCase):
"""Simple Table instantiation tests."""
maxDiff = None
def test_uvvis_table(self):
""""""
t = Table(
caption=Caption('Selected photophysical properties of biarylsubstituted pyrazoles 5–8 and 1-methyl-3,5-diphenylpyrazole (9) at room temperature'),
headings=[
[
Cell('Compound'),
Cell('Absorption maxima λmax,abs (ε) [nm] (L cm−1 mol−1)'),
Cell('Emission maxima λmax,em (Φf) [nm] (a.u.)'),
Cell('Stokes-shift Δṽ [cm−1]')
]
],
rows=[
[Cell(' 5a '), Cell('273.5 (40 100)'), Cell('357.0 (0.77)'), Cell('9400')],
[Cell(' 5b '), Cell('268.5 (36 700)'), Cell('359.0 (0.77)'), Cell('8600')],
[Cell('Coumarin 343'), Cell('263.0 (38 400)'), Cell('344.5 (0.67)'), Cell('9000')],
[Cell(' 5d '), Cell('281.0 (34 200)'), Cell('351.5 (0.97)'), Cell('7100')],
[Cell(' 5e '), Cell('285.0 (44 000)'), Cell('382.0 (0.35)'), Cell('8900')],
[Cell(' 5f '), Cell('289.0 (43 300)'), Cell('363.0 (0.80)'), Cell('7100')],
[Cell(' 5g '), Cell('285.0 (42 000)'), Cell('343.5 (0.86)'), Cell('6000')],
[Cell(' 6a '), Cell('283.5 (35 600)'), Cell('344.5 (0.49)'), Cell('6300')],
[Cell(' 6b '), Cell('267.5 (35 800)'), Cell('338.5 (0.83)'), Cell('7800')],
[Cell(' 6c '), Cell('286.0 (33 000)'), Cell('347.0 (0.27)'), Cell('6200')],
[Cell(' 6d '), Cell('306.5 (36 600)'), Cell('384.0 (0.10)'), Cell('6600')],
[Cell(' 7 '), Cell('288.5 (62 500)'), Cell('367.0 (0.07)'), Cell('7400')],
[Cell('Compound 8a '), Cell('257.0 (36 300), 293.0 sh (25 000)'), Cell('385.0 (0.41)'), Cell('8200')],
[Cell(' 8b '), Cell('257.0 (32 000), 296.0 sh (23000)'), Cell('388.0 (0.33)'), Cell('8000')],
[Cell(' 8c '), Cell('257.0 (27 400), 307.5 (18900)'), Cell('387.0 (0.12)'), Cell('6700')],
[Cell(' 8d '), Cell('268.5 (29 500)'), Cell('385.0 (0.29)'), Cell('11 300')],
[Cell('Dye 8e '), Cell('261.5 (39 900), 288.0 sh (29 600), 311.0 sh (20 500)'), Cell('386.5 (0.37)'), Cell('6300')],
[Cell(' 8f '), Cell('256.5 (27 260), 296.0 (28404)'), Cell('388.5 (0.35)'), Cell('8000')],
[Cell(' 8g '), Cell('272.5 (39 600)'), Cell('394.0 (0.30)'), Cell('11 300')],
[Cell(' 8h '), Cell('286.0 (22 900)'), Cell('382.5 (0.33)'), Cell('8800')],
[Cell(' 9 '), Cell('254.0 (28 800)'), Cell('338.5 (0.40)'), Cell('9800')]]
)
gold = [
{'labels': [u'5a'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'40100', 'value': u'273.5'}]}], 'quantum_yields': [{'value': u'0.77'}]},
{'labels': [u'5b'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'36700', 'value': u'268.5'}]}], 'quantum_yields': [{'value': u'0.77'}]},
{'names': [u'Coumarin 343'], 'quantum_yields': [{'value': u'0.67'}], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'38400', 'value': u'263.0'}]}]},
{'labels': [u'5d'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'34200', 'value': u'281.0'}]}], 'quantum_yields': [{'value': u'0.97'}]},
{'labels': [u'5e'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'44000', 'value': u'285.0'}]}], 'quantum_yields': [{'value': u'0.35'}]},
{'labels': [u'5f'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'43300', 'value': u'289.0'}]}], 'quantum_yields': [{'value': u'0.80'}]},
{'labels': [u'5g'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'42000', 'value': u'285.0'}]}], 'quantum_yields': [{'value': u'0.86'}]},
{'labels': [u'6a'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'35600', 'value': u'283.5'}]}], 'quantum_yields': [{'value': u'0.49'}]},
{'labels': [u'6b'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'35800', 'value': u'267.5'}]}], 'quantum_yields': [{'value': u'0.83'}]},
{'labels': [u'6c'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'33000', 'value': u'286.0'}]}], 'quantum_yields': [{'value': u'0.27'}]},
{'labels': [u'6d'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'36600', 'value': u'306.5'}]}], 'quantum_yields': [{'value': u'0.10'}]},
{'labels': [u'7'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'62500', 'value': u'288.5'}]}], 'quantum_yields': [{'value': u'0.07'}]},
{'labels': [u'8a'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'36300', 'value': u'257.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'25000', 'value': u'293.0'}]}], 'quantum_yields': [{'value': u'0.41'}]},
{'labels': [u'8b'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'32000', 'value': u'257.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'23000', 'value': u'296.0'}]}], 'quantum_yields': [{'value': u'0.33'}]},
{'labels': [u'8c'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'27400', 'value': u'257.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'18900', 'value': u'307.5'}]}], 'quantum_yields': [{'value': u'0.12'}]},
{'labels': [u'8d'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'29500', 'value': u'268.5'}]}], 'quantum_yields': [{'value': u'0.29'}]},
{'labels': [u'8e'], 'quantum_yields': [{'value': u'0.37'}], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'39900', 'value': u'261.5'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'29600', 'value': u'288.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'20500', 'value': u'311.0'}]}]},
{'labels': [u'8f'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'27260', 'value': u'256.5'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'28404', 'value': u'296.0'}]}], 'quantum_yields': [{'value': u'0.35'}]},
{'labels': [u'8g'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'39600', 'value': u'272.5'}]}], 'quantum_yields': [{'value': u'0.30'}]},
{'labels': [u'8h'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'22900', 'value': u'286.0'}]}], 'quantum_yields': [{'value': u'0.33'}]},
{'labels': [u'9'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'28800', 'value': u'254.0'}]}], 'quantum_yields': [{'value': u'0.40'}]},
]
for record in t.records:
print(record.serialize())
self.assertEqual(gold, [record.serialize() for record in t.records])
def test_spectroscopic_table(self):
""""""
t = Table(
caption=Caption('Spectroscopic properties of Coumarins in acetonitrile at 298 K.'),
headings=[
[
Cell(''), # Blank compound heading
Cell('λmax (nm)'),
Cell('ε (M–1 cm–1)'),
Cell('λem (nm)'),
Cell('ϕ')
]
],
rows=[
[Cell('Coumarin 343'), Cell('398'), Cell('40 800'), Cell('492'), Cell('0.52')],
[Cell('C144'), Cell('429'), Cell('9500'), Cell('601'), Cell('N/A')],
[Cell('Coumarin 34'), Cell('269'), Cell('-'), Cell('435'), Cell('<0.01')],
]
)
# for record in t.caption.records:
# print(record.to_primitive())
# print(record.is_contextual)
gold = [
{'names': ['Coumarin 343'], 'quantum_yields': [{'type': '\u03d5', 'solvent': 'acetonitrile', 'value': '0.52', 'temperature': '298', 'temperature_units': 'K'}], 'uvvis_spectra': [{'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'units': 'nm', 'value': '398'}]}, {'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'extinction': '40800', 'extinction_units': 'M \u2013 1 cm \u2013 1'}]} ]},
{'labels': ['C144'], 'uvvis_spectra': [{'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'units': 'nm', 'value': '429'}]}, {'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'extinction': '9500', 'extinction_units': 'M \u2013 1 cm \u2013 1'}]}]},
{'names': ['Coumarin 34'], 'quantum_yields': [{'type': '\u03d5', 'solvent': 'acetonitrile', 'value': '<0.01', 'temperature': '298', 'temperature_units': 'K'}], 'uvvis_spectra': [{'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'units': 'nm', 'value': '269'}]}]},
{'names': ['Coumarins']},
{'names': ['acetonitrile']}
]
# for record in t.records:
# print(record.to_primitive())
self.assertEqual(gold, [record.serialize() for record in t.records])
if __name__ == '__main__':
unittest.main()
|
"""Portfolio View"""
__docformat__ = "numpy"
import logging
from typing import List, Optional
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from gamestonk_terminal.config_terminal import theme
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.portfolio import (
portfolio_model,
)
from gamestonk_terminal.helper_funcs import (
plot_autoscale,
export_data,
)
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.rich_config import console
# from reportlab.lib.pagesizes import letter
# from reportlab.pdfgen import canvas
# from reportlab.lib.utils import ImageReader
# from gamestonk_terminal.portfolio import reportlab_helpers
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def load_info():
"""Prints instructions to load a CSV
Returns
----------
text : str
Information on how to load a csv
"""
text = """
In order to load a CSV do the following:
1. Add headers to the first row, below is data for each column:\n
\t1. Identifier for the asset (such as a stock ticker)
\t2. Type of asset (stock, bond, option, crypto)
\t3. The volume of the asset transacted
\t4. The buy date in yyyy/mm/dd
\t5. The Price paid for the asset
\t6. Any fees paid during the transaction
\t7. A premium paid or received if this was an option
\t8. Whether the asset was bought (covered) or sold (shorted)\n
2. Place this file in gamestonk_terminal/portfolio/portfolios\n
"""
console.print(text)
@log_start_end(log=logger)
def display_returns_vs_bench(
portfolio: portfolio_model.Portfolio,
benchmark: str = "SPY",
external_axes: Optional[plt.Axes] = None,
):
"""Display portfolio returns vs benchmark
Parameters
----------
portfolio: Portfolio
Custom portfolio object with trade list
benchmark: str
Symbol for benchmark. Defaults to SPY
external_axes: plt.Axes
Optional axes to display plot on
"""
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes
portfolio.generate_holdings_from_trades()
portfolio.add_benchmark(benchmark)
cumulative_returns = (1 + portfolio.returns).cumprod()
benchmark_c_returns = (1 + portfolio.benchmark_returns).cumprod()
ax.plot(cumulative_returns.index, cumulative_returns, label="Portfolio")
ax.plot(benchmark_c_returns.index, benchmark_c_returns, label="Benchmark")
ax.set_ylabel("Cumulative Returns")
ax.legend(loc="upper left")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
@log_start_end(log=logger)
def display_allocation(
portfolio: portfolio_model.Portfolio,
export: str = "",
external_axes: Optional[plt.Axes] = None,
):
"""Display allocation of assets vs time
Parameters
----------
portfolio: Portfolio
Portfolio object with trades loaded
export: str
Format to export plot
external_axes: plt.Axes
Optional axes to display plot on
"""
portfolio.generate_holdings_from_trades()
all_holdings = pd.concat(
[
portfolio.portfolio["StockHoldings"],
portfolio.portfolio["ETFHoldings"],
portfolio.portfolio["CryptoHoldings"],
],
axis=1,
)
all_holdings = all_holdings.drop(columns=["temp"])
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes
all_holdings.plot(ax=ax)
ax.set_title("Individual Asset Holdings")
ax.legend(loc="upper left")
ax.set_ylabel("Holdings ($)")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rolling",
)
@log_start_end(log=logger)
def display_rolling_stats(
portfolio: portfolio_model.Portfolio,
length: int = 60,
benchmark: str = "SPY",
risk_free_rate: float = 0,
external_axes: Optional[List[plt.Axes]] = None,
export: str = "",
):
"""Display portfolio returns vs benchmark
Parameters
----------
portfolio: Portfolio
Custom portfolio object with trade list
length: int
Length of rolling window
benchmark: str
Symbol for benchmark. Defaults to SPY
risk_free_rate: float
Value to use for risk free rate in sharpe/other calculations
external_axes: Optional[List[plt.Axes]]
Optional axes to display plot on
export: str
Export to file
"""
portfolio.generate_holdings_from_trades()
portfolio.add_benchmark(benchmark)
portfolio.add_rf(risk_free_rate)
if external_axes is None:
_, ax = plt.subplots(4, 1, figsize=(8, 8), dpi=PLOT_DPI, sharex=True)
else:
if len(external_axes) != 4:
console.print("[red]4 axes expected./n[/red]")
return
ax = external_axes
rolling_volatility = portfolio.returns.rolling(length).std()
rolling_volatility_bench = portfolio.benchmark_returns.rolling(length).std()
rolling_sharpe = portfolio.returns.rolling(length).apply(
lambda x: (x.mean() - risk_free_rate) / x.std()
)
rolling_sharpe_bench = portfolio.benchmark_returns.rolling(length).apply(
lambda x: (x.mean() - risk_free_rate) / x.std()
)
rolling_volatility.plot(ax=ax[1])
rolling_volatility_bench.plot(ax=ax[1])
ax[1].set_title("Rolling Volatility")
rolling_sharpe.plot(ax=ax[2])
rolling_sharpe_bench.plot(ax=ax[2])
ax[2].set_title("Rolling Sharpe Ratio")
# Rolling beta is defined as Cov(Port,Bench)/var(Bench)
covs = (
pd.DataFrame(
{"Portfolio": portfolio.returns, "Benchmark": portfolio.benchmark_returns}
)
.dropna(axis=0)
.rolling(length)
.cov()
.unstack()
.dropna()
)
rolling_beta = covs["Portfolio"]["Benchmark"] / covs["Benchmark"]["Benchmark"]
rolling_beta.plot(ax=ax[3])
ax[3].set_title("Rolling Beta to Benchmark")
c_returns = (1 + portfolio.returns).cumprod()
bench_c_rets = (1 + portfolio.benchmark_returns).cumprod()
ax[0].plot(c_returns.index, c_returns)
ax[0].plot(bench_c_rets.index, bench_c_rets)
ax[0].set_title("Cumulative Returns")
if external_axes is None:
for a in ax[0], ax[1], ax[2]:
a.legend(["Portfolio", "Benchmark"], loc="upper left")
for a in ax[0], ax[1], ax[2], ax[3]:
a.set_xlim(portfolio.returns.index[0], portfolio.returns.index[-1])
a.set_xlabel([])
a.grid("on")
theme.style_primary_axis(a)
ax[3].set_xlabel("Date")
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rolling",
)
@log_start_end(log=logger)
def display_drawdown(
holdings: pd.DataFrame,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display drawdown curve
Parameters
----------
holdings: pd.DataFrame
Dataframe of holdings vs time
export: str
Format to export data
external_axes: plt.Axes
Optional axes to display plot on
"""
drawdown = portfolio_model.calculate_drawdown(holdings)
if external_axes is None:
_, ax = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=PLOT_DPI, sharex=True)
else:
ax = external_axes
ax[0].plot(holdings.index, holdings)
ax[0].set_title("Holdings")
ax[1].plot(holdings.index, drawdown)
ax[1].fill_between(holdings.index, np.asarray(drawdown), alpha=0.4)
ax[1].set_title("Portfolio Drawdown")
theme.style_primary_axis(ax[1])
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"dd",
)
#
# @log_start_end(log=logger)
# def plot_overall_return(
# comb: pd.DataFrame, m_tick: str, plot: bool = False
# ) -> ImageReader:
# """Generates overall return graph
#
# Parameters
# ----------
# comb : pd.DataFrame
# Dataframe with returns
# m_tick : str
# The ticker for the market asset
# plot : bool
# Whether to plot the graph or return it for PDF
#
# Returns
# ----------
# img : ImageReader
# Overal return graph
# """
# fig, ax = plt.subplots(figsize=(10, 5))
# ax.plot(comb.index, comb["return"], color="tab:blue", label="Portfolio")
# ax.plot(comb.index, comb[("Market", "Return")], color="orange", label=m_tick)
#
# ax.set_ylabel("", fontweight="bold", fontsize=12, color="black")
# ax.set_xlabel("")
# ax.yaxis.set_label_coords(-0.1, 0.5)
# ax.grid(True)
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.spines["bottom"].set_visible(False)
# ax.spines["left"].set_visible(False)
# fig.suptitle(
# "Cumulative Performance", y=0.99, fontweight="bold", fontsize=14, color="black"
# )
# ax.axhline(0, ls="-", lw=1, color="gray", zorder=1)
# ax.axhline(0, ls="--", lw=1, color="black", zorder=2)
# fig.set_facecolor("white")
# ax.set_title(
# f'{comb.index[:1][0].strftime('%Y/%m/%d')} - {comb.index[-1:][0].strftime('%Y/%m/%d')}',
# fontsize=12,
# color="gray",
# )
# ax.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
# ax.set_facecolor("white")
# ax.legend()
# fig.autofmt_xdate()
# if plot:
# plt.show()
# console.print("")
# return None
# imgdata = BytesIO()
# fig.savefig(imgdata, format="png")
# plt.close("all")
# imgdata.seek(0)
# return ImageReader(imgdata)
#
#
# @log_start_end(log=logger)
# def plot_rolling_beta(df: pd.DataFrame) -> ImageReader:
# """Returns a chart with the portfolio's rolling beta
#
# Parameters
# ----------
# df : pd.DataFrame
# The dataframe to be analyzed
#
# Returns
# ----------
# img : ImageReader
# Rolling beta graph
# """
#
# fig, ax = plt.subplots(figsize=(10, 5))
# ax.plot(
# df.index,
# df["total"],
# color="tab:blue",
# )
#
# ax.set_ylabel("", fontweight="bold", fontsize=12, color="black")
# ax.set_xlabel("")
# ax.yaxis.set_label_coords(-0.1, 0.5)
# ax.grid(True)
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.spines["bottom"].set_visible(False)
# ax.spines["left"].set_visible(False)
# fig.suptitle(
# "Rolling Beta of Stocks", y=0.99, fontweight="bold", fontsize=14, color="black"
# )
# ax.axhline(0, ls="-", lw=1, color="gray", zorder=1)
# ax.axhline(0, ls="--", lw=1, color="black", zorder=2)
# fig.set_facecolor("white")
# ax.set_title(
# f'{df.index[:1][0].strftime('%Y-%m-%d')} - {df.index[-1:][0].strftime('%Y-%m-%d')}',
# color="gray",
# )
# ax.set_facecolor("white")
# fig.autofmt_xdate()
# imgdata = BytesIO()
# fig.savefig(imgdata, format="png")
# plt.close("all")
# imgdata.seek(0)
# return ImageReader(imgdata)
#
#
# @log_start_end(log=logger)
# def plot_ef(
# stocks: List[str],
# variance: float,
# per_ret: float,
# rf_rate: float,
# period: str = "3mo",
# n_portfolios: int = 300,
# risk_free: bool = False,
# ):
# """Display efficient frontier
#
# Parameters
# ----------
# stocks : List[str]
# List of the stocks to be included in the weights
# variance : float
# The variance for the portfolio
# per_ret : float
# The portfolio's return for the portfolio
# rf_rate : float
# The risk free rate
# period : str
# The period to track
# n_portfolios : int
# The number of portfolios to generate
# risk_free : bool
# Include the risk-free asset
# """
# fig, ax = plt.subplots(figsize=(10, 5), dpi=PLOT_DPI)
# ef, rets, stds = optimizer_model.generate_random_portfolios(
# [x.upper() for x in stocks], period, n_portfolios
# )
# sharpes = rets / stds
# ax.scatter(stds, rets, marker=".", c=sharpes, cmap="viridis_r")
# plotting.plot_efficient_frontier(ef, ax=ax, show_assets=True)
# # Find the tangency portfolio
# ret_sharpe, std_sharpe, _ = ef.portfolio_performance(risk_free_rate=rf_rate)
# ax.scatter(std_sharpe, ret_sharpe, marker="*", s=100, c="r", label="Max Sharpe")
# plt.plot(variance, per_ret, "ro", label="Portfolio")
# # Add risk free line
# if risk_free:
# y = ret_sharpe * 1.2
# m = (ret_sharpe - rf_rate) / std_sharpe
# x2 = (y - rf_rate) / m
# x = [0, x2]
# y = [rf_rate, y]
# line = Line2D(x, y, color="#FF0000", label="Capital Allocation Line")
# ax.set_xlim(xmin=min(stds) * 0.8)
# ax.add_line(line)
# ax.set_title(f"Efficient Frontier simulating {n_portfolios} portfolios")
# ax.legend()
# fig.tight_layout()
# ax.grid(b=True, which="major", color="#666666", linestyle="-")
#
# if gtff.USE_ION:
# plt.ion()
#
# imgdata = BytesIO()
# fig.savefig(imgdata, format="png")
# plt.close("all")
# imgdata.seek(0)
# return ImageReader(imgdata)
# @log_start_end(log=logger)
# def display_allocation2(data: pd.DataFrame, graph: bool):
# """Displays allocation
# Parameters
# ----------
# data: pd.DataFrame
# The portfolio allocation dataframe
# graph: bool
# If pie chart shall be displayed with table"""
#
# print_rich_table(data, headers=list(data.columns), title="Allocation")
# console.print("")
#
# if graph:
# graph_data = data[data["pct_allocation"] >= 5].copy()
# if not graph_data.empty:
# graph_data.loc["Other"] = [
# "NA",
# data["value"].sum() - graph_data["value"].sum(),
# 100 - graph_data["value"].sum(),
# ]
# labels = graph_data.index.values
# sizes = graph_data["value"].to_list()
# else:
# labels = data.index.values
# sizes = data["value"].to_list()
# fig, ax = plt.subplots()
# ax.pie(sizes, labels=labels, autopct="%1.1f%%", startangle=90)
# ax.axis("equal")
# ax.set_title("Portfolio Allocation")
# fig.set_tight_layout(True)
#
# plt.show()
#
# class Report:
# @log_start_end(log=logger)
# def __init__(self, df: pd.DataFrame, hist: pd.DataFrame, m_tick: str):
# """Generate financial reports.
# Financial reports allow users to show the how they have been performing in
# trades. This allows for a simple way to show progress and analyze metrics
# that track portfolio performance
#
# Parameters
# ----------
# df : pd.DataFrame
# The dataframe with previous holdings information
# hist : pd.DataFrame
# The dataframe with previous prices for stocks in the portfolio
# df_m : pd.DataFrame
# Dataframe of benchmark
# n : int
# The number of days to analyze
#
# Attributes
# ----------
# generate_report : None
# Generates a report with the given parameters
# generate_pg1 : None
# Creates the first page of the PDF report
# generate_pg2 : None
# Creates the second page of the PDF report
#
# """
# self.df = df
# self.hist = hist
# self.m_tick = m_tick
# self.df_m = yfinance_model.get_market(self.df.index[0], self.m_tick)
# # self.returns, self.variance = portfolio_model.get_return(df, self.df_m, n)
# self.returns = pd.DataFrame()
# self.rf = get_rf()
# self.betas = portfolio_model.get_rolling_beta(
# self.df, self.hist, self.df_m, 365
# )
#
# @log_start_end(log=logger)
# def generate_report(self) -> None:
# d = path.dirname(path.abspath(__file__)).replace(
# "gamestonk_terminal", "exports"
# )
# loc = path.abspath(
# path.join(
# d,
# f"ar_{datetime.now().strftime("%Y%m%d_%H%M%S")}.pdf",
# )
# )
# report = canvas.Canvas(loc, pagesize=letter)
# reportlab_helpers.base_format(report, "Overview")
# self.generate_pg1(report)
# self.generate_pg2(report)
# report.save()
# console.print("File save in:\n", loc, "\n")
#
# @log_start_end(log=logger)
# def generate_pg1(self, report: canvas.Canvas) -> None:
# report.drawImage(
# plot_overall_return(self.returns, self.m_tick, False), 15, 400, 600, 300
# )
# main_text = portfolio_model.get_main_text(self.returns)
# reportlab_helpers.draw_paragraph(report, main_text, 30, 410, 550, 200)
# current_return = self.returns["return"][-1]
# beta = self.betas["total"][-1]
# market_return = self.returns[("Market", "Return")][-1]
# sharpe = f"{(current_return - self.rf)/ np.std(self.returns["return"]):.2f}"
# treynor = f"{(current_return - self.rf)/ beta:.2f}" if beta > 0 else "N/A"
# alpha = f"{current_return - (self.rf + beta * (market_return - self.rf)):.2f}"
# information = (
# f"{float(alpha)/ (np.std(self.returns["return"] - market_return)):.2f}"
# )
# perf = [
# ["Sharpe", sharpe],
# ["Treynor", treynor],
# ["Alpha", alpha],
# ["Information", information],
# ]
# reportlab_helpers.draw_table(report, "Performance", 540, 300, 30, perf)
# reportlab_helpers.draw_paragraph(
# report, portfolio_model.performance_text, 140, 290, 460, 200
# )
# report.showPage()
#
# @log_start_end(log=logger)
# def generate_pg2(self, report: canvas.Canvas) -> None:
# reportlab_helpers.base_format(report, "Portfolio Analysis")
# if "Holding" in self.df.columns:
# report.drawImage(plot_rolling_beta(self.betas), 15, 400, 600, 300)
# main_t = portfolio_model.get_beta_text(self.betas)
# reportlab_helpers.draw_paragraph(report, main_t, 30, 410, 550, 200)
# # report.drawImage(plot_ef(uniques, self.variance, self.returns["return"][-1], self.rf), 15, 65, 600, 300)
|
"""Portfolio View"""
__docformat__ = "numpy"
import logging
from typing import List, Optional
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from gamestonk_terminal.config_terminal import theme
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.portfolio import (
portfolio_model,
)
from gamestonk_terminal.helper_funcs import (
plot_autoscale,
export_data,
)
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.rich_config import console
# from reportlab.lib.pagesizes import letter
# from reportlab.pdfgen import canvas
# from reportlab.lib.utils import ImageReader
# from gamestonk_terminal.portfolio import reportlab_helpers
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def load_info():
"""Prints instructions to load a CSV
Returns
----------
text : str
Information on how to load a csv
"""
text = """
In order to load a CSV do the following:
1. Add headers to the first row, below is data for each column:\n
\t1. Identifier for the asset (such as a stock ticker)
\t2. Type of asset (stock, bond, option, crypto)
\t3. The volume of the asset transacted
\t4. The buy date in yyyy/mm/dd
\t5. The Price paid for the asset
\t6. Any fees paid during the transaction
\t7. A premium paid or received if this was an option
\t8. Whether the asset was bought (covered) or sold (shorted)\n
2. Place this file in gamestonk_terminal/portfolio/portfolios\n
"""
console.print(text)
@log_start_end(log=logger)
def display_returns_vs_bench(
portfolio: portfolio_model.Portfolio,
benchmark: str = "SPY",
external_axes: Optional[plt.Axes] = None,
):
"""Display portfolio returns vs benchmark
Parameters
----------
portfolio: Portfolio
Custom portfolio object with trade list
benchmark: str
Symbol for benchmark. Defaults to SPY
external_axes: plt.Axes
Optional axes to display plot on
"""
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes
portfolio.generate_holdings_from_trades()
portfolio.add_benchmark(benchmark)
cumulative_returns = (1 + portfolio.returns).cumprod()
benchmark_c_returns = (1 + portfolio.benchmark_returns).cumprod()
ax.plot(cumulative_returns.index, cumulative_returns, label="Portfolio")
ax.plot(benchmark_c_returns.index, benchmark_c_returns, label="Benchmark")
ax.set_ylabel("Cumulative Returns")
ax.legend(loc="upper left")
theme.style_primary_axis(ax)
if not external_axes:
theme.visualize_output()
@log_start_end(log=logger)
def display_allocation(
portfolio: portfolio_model.Portfolio,
export: str = "",
external_axes: Optional[plt.Axes] = None,
):
"""Display allocation of assets vs time
Parameters
----------
portfolio: Portfolio
Portfolio object with trades loaded
export: str
Format to export plot
external_axes: plt.Axes
Optional axes to display plot on
"""
portfolio.generate_holdings_from_trades()
all_holdings = pd.concat(
[
portfolio.portfolio["StockHoldings"],
portfolio.portfolio["ETFHoldings"],
portfolio.portfolio["CryptoHoldings"],
],
axis=1,
)
all_holdings = all_holdings.drop(columns=["temp"])
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes
all_holdings.plot(ax=ax)
ax.set_title("Individual Asset Holdings")
ax.legend(loc="upper left")
ax.set_ylabel("Holdings ($)")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rolling",
)
@log_start_end(log=logger)
def display_rolling_stats(
portfolio: portfolio_model.Portfolio,
length: int = 60,
benchmark: str = "SPY",
risk_free_rate: float = 0,
external_axes: Optional[List[plt.Axes]] = None,
export: str = "",
):
"""Display portfolio returns vs benchmark
Parameters
----------
portfolio: Portfolio
Custom portfolio object with trade list
length: int
Length of rolling window
benchmark: str
Symbol for benchmark. Defaults to SPY
risk_free_rate: float
Value to use for risk free rate in sharpe/other calculations
external_axes: Optional[List[plt.Axes]]
Optional axes to display plot on
export: str
Export to file
"""
portfolio.generate_holdings_from_trades()
portfolio.add_benchmark(benchmark)
portfolio.add_rf(risk_free_rate)
if external_axes is None:
_, ax = plt.subplots(4, 1, figsize=(8, 8), dpi=PLOT_DPI, sharex=True)
else:
if len(external_axes) != 4:
console.print("[red]4 axes expected./n[/red]")
return
ax = external_axes
rolling_volatility = portfolio.returns.rolling(length).std()
rolling_volatility_bench = portfolio.benchmark_returns.rolling(length).std()
rolling_sharpe = portfolio.returns.rolling(length).apply(
lambda x: (x.mean() - risk_free_rate) / x.std()
)
rolling_sharpe_bench = portfolio.benchmark_returns.rolling(length).apply(
lambda x: (x.mean() - risk_free_rate) / x.std()
)
rolling_volatility.plot(ax=ax[1])
rolling_volatility_bench.plot(ax=ax[1])
ax[1].set_title("Rolling Volatility")
rolling_sharpe.plot(ax=ax[2])
rolling_sharpe_bench.plot(ax=ax[2])
ax[2].set_title("Rolling Sharpe Ratio")
# Rolling beta is defined as Cov(Port,Bench)/var(Bench)
covs = (
pd.DataFrame(
{"Portfolio": portfolio.returns, "Benchmark": portfolio.benchmark_returns}
)
.dropna(axis=0)
.rolling(length)
.cov()
.unstack()
.dropna()
)
rolling_beta = covs["Portfolio"]["Benchmark"] / covs["Benchmark"]["Benchmark"]
rolling_beta.plot(ax=ax[3])
ax[3].set_title("Rolling Beta to Benchmark")
c_returns = (1 + portfolio.returns).cumprod()
bench_c_rets = (1 + portfolio.benchmark_returns).cumprod()
ax[0].plot(c_returns.index, c_returns)
ax[0].plot(bench_c_rets.index, bench_c_rets)
ax[0].set_title("Cumulative Returns")
if external_axes is None:
for a in ax[0], ax[1], ax[2]:
a.legend(["Portfolio", "Benchmark"], loc="upper left")
for a in ax[0], ax[1], ax[2], ax[3]:
a.set_xlim(portfolio.returns.index[0], portfolio.returns.index[-1])
a.set_xlabel([])
a.grid("on")
theme.style_primary_axis(a)
ax[3].set_xlabel("Date")
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"rolling",
)
@log_start_end(log=logger)
def display_drawdown(
holdings: pd.DataFrame,
export: str = "",
external_axes: Optional[List[plt.Axes]] = None,
):
"""Display drawdown curve
Parameters
----------
holdings: pd.DataFrame
Dataframe of holdings vs time
export: str
Format to export data
external_axes: plt.Axes
Optional axes to display plot on
"""
drawdown = portfolio_model.calculate_drawdown(holdings)
if external_axes is None:
_, ax = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=PLOT_DPI, sharex=True)
else:
ax = external_axes
ax[0].plot(holdings.index, holdings)
ax[0].set_title("Holdings")
ax[1].plot(holdings.index, drawdown)
ax[1].fill_between(holdings.index, np.asarray(drawdown), alpha=0.4)
ax[1].set_title("Portfolio Drawdown")
theme.style_primary_axis(ax[1])
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"dd",
)
#
# @log_start_end(log=logger)
# def plot_overall_return(
# comb: pd.DataFrame, m_tick: str, plot: bool = False
# ) -> ImageReader:
# """Generates overall return graph
#
# Parameters
# ----------
# comb : pd.DataFrame
# Dataframe with returns
# m_tick : str
# The ticker for the market asset
# plot : bool
# Whether to plot the graph or return it for PDF
#
# Returns
# ----------
# img : ImageReader
# Overal return graph
# """
# fig, ax = plt.subplots(figsize=(10, 5))
# ax.plot(comb.index, comb["return"], color="tab:blue", label="Portfolio")
# ax.plot(comb.index, comb[("Market", "Return")], color="orange", label=m_tick)
#
# ax.set_ylabel("", fontweight="bold", fontsize=12, color="black")
# ax.set_xlabel("")
# ax.yaxis.set_label_coords(-0.1, 0.5)
# ax.grid(True)
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.spines["bottom"].set_visible(False)
# ax.spines["left"].set_visible(False)
# fig.suptitle(
# "Cumulative Performance", y=0.99, fontweight="bold", fontsize=14, color="black"
# )
# ax.axhline(0, ls="-", lw=1, color="gray", zorder=1)
# ax.axhline(0, ls="--", lw=1, color="black", zorder=2)
# fig.set_facecolor("white")
# ax.set_title(
# f'{comb.index[:1][0].strftime("%Y/%m/%d")} - {comb.index[-1:][0].strftime("%Y/%m/%d")}',
# fontsize=12,
# color="gray",
# )
# ax.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
# ax.set_facecolor("white")
# ax.legend()
# fig.autofmt_xdate()
# if plot:
# plt.show()
# console.print("")
# return None
# imgdata = BytesIO()
# fig.savefig(imgdata, format="png")
# plt.close("all")
# imgdata.seek(0)
# return ImageReader(imgdata)
#
#
# @log_start_end(log=logger)
# def plot_rolling_beta(df: pd.DataFrame) -> ImageReader:
# """Returns a chart with the portfolio's rolling beta
#
# Parameters
# ----------
# df : pd.DataFrame
# The dataframe to be analyzed
#
# Returns
# ----------
# img : ImageReader
# Rolling beta graph
# """
#
# fig, ax = plt.subplots(figsize=(10, 5))
# ax.plot(
# df.index,
# df["total"],
# color="tab:blue",
# )
#
# ax.set_ylabel("", fontweight="bold", fontsize=12, color="black")
# ax.set_xlabel("")
# ax.yaxis.set_label_coords(-0.1, 0.5)
# ax.grid(True)
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.spines["bottom"].set_visible(False)
# ax.spines["left"].set_visible(False)
# fig.suptitle(
# "Rolling Beta of Stocks", y=0.99, fontweight="bold", fontsize=14, color="black"
# )
# ax.axhline(0, ls="-", lw=1, color="gray", zorder=1)
# ax.axhline(0, ls="--", lw=1, color="black", zorder=2)
# fig.set_facecolor("white")
# ax.set_title(
# f'{df.index[:1][0].strftime("%Y-%m-%d")} - {df.index[-1:][0].strftime("%Y-%m-%d")}',
# color="gray",
# )
# ax.set_facecolor("white")
# fig.autofmt_xdate()
# imgdata = BytesIO()
# fig.savefig(imgdata, format="png")
# plt.close("all")
# imgdata.seek(0)
# return ImageReader(imgdata)
#
#
# @log_start_end(log=logger)
# def plot_ef(
# stocks: List[str],
# variance: float,
# per_ret: float,
# rf_rate: float,
# period: str = "3mo",
# n_portfolios: int = 300,
# risk_free: bool = False,
# ):
# """Display efficient frontier
#
# Parameters
# ----------
# stocks : List[str]
# List of the stocks to be included in the weights
# variance : float
# The variance for the portfolio
# per_ret : float
# The portfolio's return for the portfolio
# rf_rate : float
# The risk free rate
# period : str
# The period to track
# n_portfolios : int
# The number of portfolios to generate
# risk_free : bool
# Include the risk-free asset
# """
# fig, ax = plt.subplots(figsize=(10, 5), dpi=PLOT_DPI)
# ef, rets, stds = optimizer_model.generate_random_portfolios(
# [x.upper() for x in stocks], period, n_portfolios
# )
# sharpes = rets / stds
# ax.scatter(stds, rets, marker=".", c=sharpes, cmap="viridis_r")
# plotting.plot_efficient_frontier(ef, ax=ax, show_assets=True)
# # Find the tangency portfolio
# ret_sharpe, std_sharpe, _ = ef.portfolio_performance(risk_free_rate=rf_rate)
# ax.scatter(std_sharpe, ret_sharpe, marker="*", s=100, c="r", label="Max Sharpe")
# plt.plot(variance, per_ret, "ro", label="Portfolio")
# # Add risk free line
# if risk_free:
# y = ret_sharpe * 1.2
# m = (ret_sharpe - rf_rate) / std_sharpe
# x2 = (y - rf_rate) / m
# x = [0, x2]
# y = [rf_rate, y]
# line = Line2D(x, y, color="#FF0000", label="Capital Allocation Line")
# ax.set_xlim(xmin=min(stds) * 0.8)
# ax.add_line(line)
# ax.set_title(f"Efficient Frontier simulating {n_portfolios} portfolios")
# ax.legend()
# fig.tight_layout()
# ax.grid(b=True, which="major", color="#666666", linestyle="-")
#
# if gtff.USE_ION:
# plt.ion()
#
# imgdata = BytesIO()
# fig.savefig(imgdata, format="png")
# plt.close("all")
# imgdata.seek(0)
# return ImageReader(imgdata)
# @log_start_end(log=logger)
# def display_allocation2(data: pd.DataFrame, graph: bool):
# """Displays allocation
# Parameters
# ----------
# data: pd.DataFrame
# The portfolio allocation dataframe
# graph: bool
# If pie chart shall be displayed with table"""
#
# print_rich_table(data, headers=list(data.columns), title="Allocation")
# console.print("")
#
# if graph:
# graph_data = data[data["pct_allocation"] >= 5].copy()
# if not graph_data.empty:
# graph_data.loc["Other"] = [
# "NA",
# data["value"].sum() - graph_data["value"].sum(),
# 100 - graph_data["value"].sum(),
# ]
# labels = graph_data.index.values
# sizes = graph_data["value"].to_list()
# else:
# labels = data.index.values
# sizes = data["value"].to_list()
# fig, ax = plt.subplots()
# ax.pie(sizes, labels=labels, autopct="%1.1f%%", startangle=90)
# ax.axis("equal")
# ax.set_title("Portfolio Allocation")
# fig.set_tight_layout(True)
#
# plt.show()
#
# class Report:
# @log_start_end(log=logger)
# def __init__(self, df: pd.DataFrame, hist: pd.DataFrame, m_tick: str):
# """Generate financial reports.
# Financial reports allow users to show the how they have been performing in
# trades. This allows for a simple way to show progress and analyze metrics
# that track portfolio performance
#
# Parameters
# ----------
# df : pd.DataFrame
# The dataframe with previous holdings information
# hist : pd.DataFrame
# The dataframe with previous prices for stocks in the portfolio
# df_m : pd.DataFrame
# Dataframe of benchmark
# n : int
# The number of days to analyze
#
# Attributes
# ----------
# generate_report : None
# Generates a report with the given parameters
# generate_pg1 : None
# Creates the first page of the PDF report
# generate_pg2 : None
# Creates the second page of the PDF report
#
# """
# self.df = df
# self.hist = hist
# self.m_tick = m_tick
# self.df_m = yfinance_model.get_market(self.df.index[0], self.m_tick)
# # self.returns, self.variance = portfolio_model.get_return(df, self.df_m, n)
# self.returns = pd.DataFrame()
# self.rf = get_rf()
# self.betas = portfolio_model.get_rolling_beta(
# self.df, self.hist, self.df_m, 365
# )
#
# @log_start_end(log=logger)
# def generate_report(self) -> None:
# d = path.dirname(path.abspath(__file__)).replace(
# "gamestonk_terminal", "exports"
# )
# loc = path.abspath(
# path.join(
# d,
# f"ar_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf",
# )
# )
# report = canvas.Canvas(loc, pagesize=letter)
# reportlab_helpers.base_format(report, "Overview")
# self.generate_pg1(report)
# self.generate_pg2(report)
# report.save()
# console.print("File save in:\n", loc, "\n")
#
# @log_start_end(log=logger)
# def generate_pg1(self, report: canvas.Canvas) -> None:
# report.drawImage(
# plot_overall_return(self.returns, self.m_tick, False), 15, 400, 600, 300
# )
# main_text = portfolio_model.get_main_text(self.returns)
# reportlab_helpers.draw_paragraph(report, main_text, 30, 410, 550, 200)
# current_return = self.returns["return"][-1]
# beta = self.betas["total"][-1]
# market_return = self.returns[("Market", "Return")][-1]
# sharpe = f"{(current_return - self.rf)/ np.std(self.returns['return']):.2f}"
# treynor = f"{(current_return - self.rf)/ beta:.2f}" if beta > 0 else "N/A"
# alpha = f"{current_return - (self.rf + beta * (market_return - self.rf)):.2f}"
# information = (
# f"{float(alpha)/ (np.std(self.returns['return'] - market_return)):.2f}"
# )
# perf = [
# ["Sharpe", sharpe],
# ["Treynor", treynor],
# ["Alpha", alpha],
# ["Information", information],
# ]
# reportlab_helpers.draw_table(report, "Performance", 540, 300, 30, perf)
# reportlab_helpers.draw_paragraph(
# report, portfolio_model.performance_text, 140, 290, 460, 200
# )
# report.showPage()
#
# @log_start_end(log=logger)
# def generate_pg2(self, report: canvas.Canvas) -> None:
# reportlab_helpers.base_format(report, "Portfolio Analysis")
# if "Holding" in self.df.columns:
# report.drawImage(plot_rolling_beta(self.betas), 15, 400, 600, 300)
# main_t = portfolio_model.get_beta_text(self.betas)
# reportlab_helpers.draw_paragraph(report, main_t, 30, 410, 550, 200)
# # report.drawImage(plot_ef(uniques, self.variance, self.returns["return"][-1], self.rf), 15, 65, 600, 300)
|
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter import exceptions
from hyperparameter_hunter.settings import G
from hyperparameter_hunter.utils.general_utils import now_time, expand_mins_secs
##################################################
# Import Miscellaneous Assets
##################################################
from contextlib import suppress
from datetime import datetime
import inspect
import logging
import os.path
import sys
class ReportingHandler(object):
def __init__(
self,
heartbeat_path=None,
float_format="{:.5f}",
console_params=None,
heartbeat_params=None,
add_frame=False,
):
"""Class in control of logging methods, log formatting, and initializing Experiment logging
Parameters
----------
heartbeat_path: Str path, or None, default=None
If string and valid heartbeat path, logging messages will also be saved in this file
float_format: String, default='{:.5f}'
If not default, must be a valid formatting string for floating point values. If invalid,
default will be used
console_params: Dict, or None, default=None
Parameters passed to :meth:`_configure_console_handler`
heartbeat_params: Dict, or None, default=None
Parameters passed to :meth:`_configure_heartbeat_handler`
add_frame: Boolean, default=False
If True, whenever :meth:`log` is called, the source of the call will be prepended to
the content being logged"""
self.reporting_type = "logging" # TODO: Add `reporting_type` kwarg (logging, advanced)
self.heartbeat_path = heartbeat_path
self.float_format = float_format
self.console_params = console_params or {}
self.heartbeat_params = heartbeat_params or {}
self.add_frame = add_frame
self._validate_parameters()
self._configure_reporting_type()
def _validate_parameters(self):
"""Ensure all logging parameters are properly formatted"""
#################### reporting_type ####################
valid_types = ["logging", "standard", "advanced"]
if not isinstance(self.reporting_type, str):
raise TypeError(f"reporting_type must be a str. Received {self.reporting_type}")
if self.reporting_type not in valid_types:
raise ValueError(f"reporting_type must be in {valid_types}, not {self.reporting_type}")
#################### heartbeat_path ####################
if self.heartbeat_path is not None:
if not isinstance(self.heartbeat_path, str):
raise TypeError(f"heartbeat_path must be a str. Received {self.heartbeat_path}")
head, tail = os.path.split(self.heartbeat_path)
if not tail.endswith(".log"):
raise ValueError(f"heartbeat_path must end in '.log'. Given {self.heartbeat_path}")
if not os.path.exists(head):
raise FileNotFoundError(
f"heartbeat_path must start with an existing dir. Given {self.heartbeat_path}"
)
#################### float_format ####################
if not isinstance(self.float_format, str):
raise TypeError(f"float_format must be a format str. Received {self.float_format}")
if (not self.float_format.startswith("{")) or (not self.float_format.endswith("}")):
raise ValueError(f"float_format must be inside '{{" and "}}'. Got {self.float_format}")
#################### console_params ####################
if not isinstance(self.console_params, dict):
raise TypeError(f"console_params must be dict or None. Given {self.console_params}")
#################### heartbeat_params ####################
if not isinstance(self.heartbeat_params, dict):
raise TypeError(f"heartbeat_params must be dict or None. Given {self.heartbeat_params}")
def _configure_reporting_type(self):
"""Set placeholder logging methods to :attr:`reporting_type` specs and initialize logging"""
if self.reporting_type == "standard":
raise ValueError("Standard logging is not yet implemented. Please choose 'logging'")
# setattr(self, 'log', self._standard_log)
# setattr(self, 'debug', self._standard_debug)
# setattr(self, 'warn', self._standard_warn)
elif self.reporting_type == "logging":
setattr(self, "log", self._logging_log)
setattr(self, "debug", self._logging_debug)
setattr(self, "warn", self._logging_warn)
self._initialize_logging_logging()
elif self.reporting_type == "advanced":
raise ValueError("Advanced logging unimplemented. Please use 'logging'")
def _initialize_logging_logging(self):
"""Initialize and configure logging to be handled by the `logging` library"""
#################### Clear Logging Configuration ####################
root = logging.getLogger()
list(map(root.removeHandler, root.handlers[:]))
list(map(root.removeFilter, root.filters[:]))
#################### Configure Logging ####################
exceptions.hook_exception_handler()
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
handlers = [self._configure_console_handler(**self.console_params)]
# Suppress FileExistsError - Raised when self.heartbeat_path is None, meaning heartbeat blacklisted
with suppress(FileExistsError):
handlers.append(self._configure_heartbeat_handler(**self.heartbeat_params))
logging.basicConfig(handlers=handlers, level=logging.DEBUG)
self.debug("Logging Logging has been initialized!")
# noinspection PyUnusedLocal
@staticmethod
def _configure_console_handler(level="INFO", fmt=None, datefmt="%H:%M:%S", style="%", **kwargs):
"""Configure the console handler in charge of printing log messages
Parameters
----------
level: String, or Int, default='DEBUG'
Minimum message level for the console. Passed to :meth:`logging.StreamHandler.setlevel`
fmt: String, or None, default=None
Message formatting string for the console. Passed to :meth:`logging.Formatter.__init__`
datefmt: String, or None, default="%H:%M:%S"
Date formatting string for the console. Passed to :meth:`logging.Formatter.__init__`.
For the `logging` library default, use `datefmt=None` ("%Y-%m-%d %H:%M:%S" + <ms>)
style: String, default='%'
Type of string formatting used. Passed to :meth:`logging.Formatter.__init__`
**kwargs: Dict
Extra keyword arguments
Returns
-------
console_handler: `logging.StreamHandler` instance
The instantiated handler for the console"""
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(level)
fmt = fmt or "<%(asctime)s> %(message)s"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt, style=style)
console_handler.setFormatter(formatter)
return console_handler
# noinspection PyUnusedLocal
def _configure_heartbeat_handler(
self, level="DEBUG", fmt=None, datefmt=None, style="%", **kwargs
):
"""Configure the file handler in charge of adding log messages to the heartbeat file
Parameters
----------
level: String, or Int, default='DEBUG'
Minimum message level for the heartbeat file. Passed to
:meth:`logging.FileHandler.setlevel`
fmt: String, or None, default=None
Message formatting string for the heartbeat file. Passed to
:meth:`logging.Formatter.__init__`
datefmt: String, or None, default=None
Date formatting string for the heartbeat file. Passed to
:meth:`logging.Formatter.__init__`
style: String, default='%'
Type of string formatting used. Passed to :meth:`logging.Formatter.__init__`
**kwargs: Dict
Extra keyword arguments
Returns
-------
file_handler: `logging.FileHandler` instance
The instantiated handler for the heartbeat file"""
if self.heartbeat_path is None:
raise FileExistsError
file_handler = logging.FileHandler(self.heartbeat_path, mode="w")
file_handler.setLevel(level)
fmt = fmt or "<%(asctime)s> %(levelname)-8s - %(message)s"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt, style=style)
file_handler.setFormatter(formatter)
return file_handler
##################################################
# Placeholder Methods:
##################################################
def log(self, content, **kwargs):
"""Placeholder method before proper initialization"""
def debug(self, content, **kwargs):
"""Placeholder method before proper initialization"""
def warn(self, content, **kwargs):
"""Placeholder method before proper initialization"""
##################################################
# Logging-Logging Methods:
##################################################
# noinspection PyUnusedLocal
def _logging_log(
self, content, verbose_threshold=None, previous_frame=None, add_time=False, **kwargs
):
"""Log an info message via the `logging` library
Parameters
----------
content: String
The message to log
verbose_threshold: Int, or None, default=None
If None, `content` logged normally. If int and `G.Env.verbose` >= `verbose_threshold`,
`content` is logged normally. Else if int and `G.Env.verbose` < `verbose_threshold`,
then `content` is logged on the `logging.debug` level, instead of `logging.info`
previous_frame: Frame, or None, default=None
The frame preceding the log call. If not provided, it will be inferred
add_time: Boolean, default=False
If True, the current time will be added to `content` before logging
**kwargs: Dict
Extra keyword arguments"""
if self.add_frame is True:
previous_frame = previous_frame or inspect.currentframe().f_back
try:
frame_source = format_frame_source(previous_frame)
finally:
del previous_frame
content = f"{frame_source} - {content}"
content = add_time_to_content(content, add_time=add_time)
if (verbose_threshold is None) or (G.Env.verbose >= verbose_threshold):
logging.info(content)
else:
logging.debug(content)
# noinspection PyUnusedLocal
def _logging_debug(self, content, previous_frame=None, add_time=False, **kwargs):
"""Log a debug message via the `logging` library
Parameters
----------
content: String
The message to log
previous_frame: Frame, or None, default=None
The frame preceding the debug call. If not provided, it will be inferred
add_time: Boolean, default=False
If True, the current time will be added to `content` before logging
**kwargs: Dict
Extra keyword arguments"""
if self.add_frame is True:
previous_frame = previous_frame or inspect.currentframe().f_back
try:
frame_source = format_frame_source(previous_frame)
finally:
del previous_frame
content = f"{frame_source} - {content}"
content = add_time_to_content(content, add_time=add_time)
logging.debug(content)
# noinspection PyUnusedLocal
def _logging_warn(self, content, **kwargs):
"""Log a warning message via the `logging` library
Parameters
----------
content: String
The message to log
**kwargs: Dict
Extra keyword arguments"""
if self.add_frame is True:
previous_frame = inspect.currentframe().f_back
try:
frame_source = format_frame_source(previous_frame)
finally:
del previous_frame
content = f"{frame_source} - {content}"
logging.warning(content)
class _Color:
"""Object defining color codes for use with logging"""
BLUE = "\033[34m"
CYAN = "\033[36m"
GREEN = "\033[32m"
MAGENTA = "\033[35m"
RED = "\033[31m"
STOP = "\033[0m"
class OptimizationReporter:
def __init__(self, parameter_names, verbose=1, show_experiment_id=8, do_maximize=True):
"""A MixIn class for reporting the results of hyperparameter optimization rounds
Parameters
----------
parameter_names: List
The names of the hyperparameters being evaluated and optimized
verbose: Int in [0, 1, 2], default=1
If 0, all but critical logging is silenced. If 1, normal logging is performed. If 2,
detailed logging is performed
show_experiment_id: Int, or Boolean, default=8
If True, the experiment_id will be printed in each result row. If False, it will not.
If int, the first `show_experiment_id`-many characters of each experiment_id will be
printed in each row
do_maximize: Boolean, default=True
If False, smaller metric values will be considered preferred and will be highlighted to
stand out. Else larger metric values will be treated as preferred"""
self.original_parameter_names = parameter_names
self.verbose = verbose
self.show_experiment_id = (
36 if (show_experiment_id is True or show_experiment_id > 36) else show_experiment_id
)
self.do_maximize = do_maximize
self.end = " | "
self.y_max = None
self.x_max = None
self.iteration = 0
self.start_time = datetime.now()
self.last_round = datetime.now()
skip = ("model_init_params", "model_extra_params", "feature_engineer", "feature_selector")
self.parameter_names = [_[1:] if _[0] in skip else _ for _ in self.original_parameter_names]
self.parameter_names = [_[1:] if _[0] == "params" else _ for _ in self.parameter_names]
self.parameter_names = [
_[0] if len(_) == 1 else str(_).replace("'", "").replace('"', "")
for _ in self.parameter_names
]
self.sizes = [max(len(_), 7) for _ in self.parameter_names]
self.sorted_indexes = sorted(
range(len(self.parameter_names)), key=self.parameter_names.__getitem__
)
def print_saved_results_header(self):
"""Print a header signifying that saved Experiment results are being read"""
header = f"{_Color.RED}Saved Result Files{_Color.STOP}"
self.print_header(header, (_Color.RED + "_" * self._line_len() + _Color.STOP))
def print_random_points_header(self):
"""Print a header signifying that random point evaluation rounds are starting"""
header = f"{_Color.RED}Random Point Evaluation{_Color.STOP}"
self.print_header(header, (_Color.RED + "_" * self._line_len() + _Color.STOP))
def print_optimization_header(self):
"""Print a header signifying that Optimization rounds are starting"""
header = f"{_Color.RED}Hyperparameter Optimization{_Color.STOP}"
self.print_header(header, (_Color.RED + "_" * self._line_len() + _Color.STOP))
def _line_len(self):
"""Calculate number of characters a header's underlining should span
Returns
-------
line_len: Int
The number of characters the line should span"""
line_len = 29
line_len += sum([_ + 5 for _ in self.sizes])
line_len += self.show_experiment_id + 3 if self.show_experiment_id else 0
return line_len
def print_header(self, header, line):
"""Utility to perform actual printing of headers given formatted inputs
Parameters
----------
header: String
Specifies the stage of optimization being entered, and the type of results to follow
line: String
The underlining to follow `header`"""
print(header)
print(line)
self._print_column_name("Step", 5)
if self.show_experiment_id:
self._print_column_name("ID", self.show_experiment_id)
self._print_column_name("Time", 6)
self._print_column_name("Value", 10)
for index in self.sorted_indexes:
self._print_column_name(self.parameter_names[index], self.sizes[index] + 2)
print("")
def _print_column_name(self, value, size):
"""Print a column name within a specified `size` constraint
Parameters
----------
value: String
The name of the column to print
size: Int
The number of characters that `value` should span"""
try:
print("{0:>{1}}".format(value, size), end=self.end)
except TypeError: # Probably given tuple including param origin (init_params, extra_params, etc.)
if len(value) == 1:
print("{0:>{1}}".format(value[0], size), end=self.end)
else:
print("{0:>{1}}".format(str(value), size), end=self.end)
def print_result(self, hyperparameters, evaluation, experiment_id=None):
"""Print a row containing the results of an Experiment just executed
Parameters
----------
hyperparameters: List
List of hyperparameter values in the same order as :attr:`parameter_names`
evaluation: Float
An evaluation of the performance of `hyperparameters`
experiment_id: Str, or None, default=None
If not None, should be a string that is the UUID of the Experiment"""
if not self.verbose:
return
print("{:>5d}".format(self.iteration), end=self.end)
#################### Experiment ID ####################
if self.show_experiment_id:
if experiment_id is not None:
print("{}".format(experiment_id[: self.show_experiment_id]), end=self.end)
else:
print(" " * self.show_experiment_id, end=self.end)
#################### Time Elapsed ####################
minutes, seconds = divmod((datetime.now() - self.last_round).total_seconds(), 60)
print(expand_mins_secs(minutes, seconds), end=self.end)
#################### Evaluation Result ####################
if (
(self.y_max is None) # First evaluation
or (self.do_maximize and self.y_max < evaluation) # Found new max (best)
or (not self.do_maximize and self.y_max > evaluation) # Found new min (best)
):
self.y_max, self.x_max = evaluation, hyperparameters
self._print_target_value(evaluation, pre=_Color.MAGENTA, post=_Color.STOP)
self._print_input_values(hyperparameters, pre=_Color.GREEN, post=_Color.STOP)
else:
self._print_target_value(evaluation)
self._print_input_values(hyperparameters)
print("")
self.last_round = datetime.now()
self.iteration += 1
def _print_target_value(self, value, pre="", post=""):
"""Print the utility of an Experiment
Parameters
----------
value: String
The utility value to print
pre: String, default=''
Content to prepend to the formatted `value` string before printing
post: String, default=''
Content to append to the formatted `value` string before printing"""
content = pre + "{: >10.5f}".format(value) + post
print(content, end=self.end)
def _print_input_values(self, values, pre="", post=""):
"""Print the value of a hyperparameter used by an Experiment
Parameters
----------
value: String
The hyperparameter value to print
pre: String, default=''
Content to prepend to the formatted `value` string before printing
post: String, default=''
Content to append to the formatted `value` string before printing"""
for index in self.sorted_indexes:
if isinstance(values[index], float):
content = "{0: >{1}.{2}f}".format(
values[index], self.sizes[index] + 2, min(self.sizes[index] - 3, 6 - 2)
)
else:
content = "{0: >{1}}".format(values[index], self.sizes[index] + 2)
print(pre + content + post, end=self.end)
def reset_timer(self):
"""Set :attr:`start_time`, and :attr:`last_round` to the current time"""
self.start_time = datetime.now()
self.last_round = datetime.now()
def print_summary(self):
"""Print a summary of the results of hyperparameter optimization upon completion"""
# TODO: Finish this
if not self.verbose:
return
def format_frame_source(previous_frame, **kwargs):
"""Construct a string describing the location at which a call was made
Parameters
----------
previous_frame: Frame
A frame depicting the location at which a call was made
**kwargs: Dict
Any additional kwargs to supply to :func:`reporting.stringify_frame_source`
Returns
-------
The stringified frame source information of `previous_frame`"""
source = inspect.getframeinfo(previous_frame)
src_script, src_line_no, src_func, src_class = source[0], source[1], source[2], None
with suppress(AttributeError, KeyError):
src_class = type(previous_frame.f_locals["self"]).__name__
return stringify_frame_source(src_script, src_line_no, src_func, src_class, **kwargs)
def stringify_frame_source(
src_file,
src_line_no,
src_func,
src_class,
add_line_no=True,
max_line_no_size=4,
total_max_size=80,
):
"""Construct a string that neatly displays the location in the code at which a call was made
Parameters
----------
src_file: Str
A filepath
src_line_no: Int
The line number in `src_file` at which the call was made
src_func: Str
The name of the function in `src_file` in which the call was made
src_class: Str, or None
If not None, the class in `src_file` in which the call was made
add_line_no: Boolean, default=False
If True, the line number will be included in the `source_content` result
max_line_no_size: Int, default=4
Total number (including padding) of characters to be occupied by `src_line_no`. For
example, if `src_line_no`=32, and `max_line_no_size`=4, `src_line_no` will be padded to
become '32 ' in order to occupy four characters
total_max_size: Int, default=80
Total number (including padding) of characters to be occupied by the `source_content` result
Returns
-------
source_content: Str
A formatted string containing the location in the code at which a call was made
Examples
--------
>>> stringify_frame_source("reporting.py", 570, "stringify_frame_source", None)
'570 - reporting.stringify_frame_source() '
>>> stringify_frame_source("reporting.py", 12, "bar", "Foo")
'12 - reporting.Foo.bar() '
>>> stringify_frame_source("reporting.py", 12, "bar", "Foo", add_line_no=False)
'reporting.Foo.bar() '
>>> stringify_frame_source("reporting.py", 12, "bar", "Foo", total_max_size=60)
'12 - reporting.Foo.bar() '"""
source_content = ""
if add_line_no is True:
# Left-align line_no to size: max_line_no_size
source_content += "{0:<{1}}".format(src_line_no, max_line_no_size)
source_content += " - "
script_name = os.path.splitext(os.path.basename(src_file))[0]
if src_class is not None:
source_content += "{}.{}.{}()".format(script_name, src_class, src_func)
else:
source_content += "{}.{}()".format(script_name, src_func)
source_content = "{0:<{1}}".format(source_content, total_max_size)
return source_content
def add_time_to_content(content, add_time=False):
"""Construct a string containing the original `content`, in addition to the current time
Parameters
----------
content: Str
The original string, to which the current time will be concatenated
add_time: Boolean, default=False
If True, the current time will be concatenated onto the end of `content`
Returns
-------
content: Str
Str containing original `content`, along with current time, and additional formatting"""
add_content = ""
add_time = now_time() if add_time is True else add_time
add_content += "Time: {}".format(add_time) if add_time else ""
#################### Combine Original and New Content ####################
if add_content != "":
content += " " if ((content != "") and (not content.endswith(" "))) else ""
content += add_content
return content
def format_fold_run(rep=None, fold=None, run=None, mode="concise"):
"""Construct a string to display the repetition, fold, and run currently being executed
Parameters
----------
rep: Int, or None, default=None
The repetition number currently being executed
fold: Int, or None, default=None
The fold number currently being executed
run: Int, or None, default=None
The run number currently being executed
mode: {"concise", "verbose"}, default="concise"
If "concise", the result will contain abbreviations for rep/fold/run
Returns
-------
content: Str
A clean display of the current repetition/fold/run
Examples
--------
>>> format_fold_run(rep=0, fold=3, run=2, mode="concise")
'R0-f3-r2'
>>> format_fold_run(rep=0, fold=3, run=2, mode="verbose")
'Rep-Fold-Run: 0-3-2'
>>> format_fold_run(rep=0, fold=3, run="*", mode="concise")
'R0-f3-r*'
>>> format_fold_run(rep=0, fold=3, run=2, mode="foo")
Traceback (most recent call last):
File "reporting.py", line ?, in format_fold_run
ValueError: Received invalid mode value: 'foo'"""
content = ""
if mode == "verbose":
content += format("Rep" if rep is not None else "")
content += format("-" if rep is not None and fold is not None else "")
content += format("Fold" if fold is not None else "")
content += format("-" if fold is not None and run is not None else "")
content += format("Run" if run is not None else "")
content += format(": " if any(_ is not None for _ in [rep, fold, run]) else "")
content += format(rep if rep is not None else "")
content += format("-" if rep is not None and fold is not None else "")
content += format(fold if fold is not None else "")
content += format("-" if fold is not None and run is not None else "")
content += format(run if run is not None else "")
elif mode == "concise":
content += format("R" if rep is not None else "")
content += format(rep if rep is not None else "")
content += format("-" if rep is not None and fold is not None else "")
content += format("f" if fold is not None else "")
content += format(fold if fold is not None else "")
content += format("-" if fold is not None and run is not None else "")
content += format("r" if run is not None else "")
content += format(run if run is not None else "")
else:
raise ValueError("Received invalid mode value: '{}'".format(mode))
return content
def format_evaluation(results, separator=" | ", float_format="{:.5f}"):
"""Construct a string to neatly display the results of a model evaluation
Parameters
----------
results: Dict
The results of a model evaluation, in which keys represent the dataset type evaluated, and
values are dicts containing metrics as keys, and metric values as values
separator: Str, default=' | '
The string used to join all the metric values into a single string
float_format: Str, default='{:.5f}'
A python string float formatter, applied to floating metric values
Returns
-------
content: Str
The model's evaluation results"""
content = []
for data_type, values in results.items():
if values is None:
continue
data_type = "OOF" if data_type == "oof" else data_type
data_type = "Holdout" if data_type == "holdout" else data_type
data_type = "In-Fold" if data_type == "in_fold" else data_type
metric_entry = "{}(".format(data_type)
metric_entry_vals = []
for metric_id, metric_value in values.items():
try:
formatted_value = float_format.format(metric_value)
except ValueError:
formatted_value = "{}".format(metric_value)
metric_entry_vals.append("{}={}".format(metric_id, formatted_value))
metric_entry += ", ".join(metric_entry_vals) + ")"
content.append(metric_entry)
content = separator.join(content)
return content
# ADVANCED_FIT_LOGGING_DISPLAY_LAYOUT = [
# {
# "column_name": "General",
# "sub_columns_names": [
# ["fold", "Fold"],
# ["run", "Run"],
# ["seed", "Seed"],
# ["step", "Step"],
# ["start_time", "Start Time"],
# ["end_time", "End Time"],
# ["time_elapsed", "Time Elapsed"]
# ],
# "sub_column_min_sizes": [10, 10, 10, 20, 12, 12, 12]
# },
# # Will need to alter default "Score" sub-columns according to what metrics are actually being used
# {
# "column_name": "OOF Scores",
# "sub_columns_names": [
# ["oof_f1", "F1"],
# ["oof_roc_auc", "ROC_AUC"]
# ]
# },
# # Check that Holdout dataset is in use before adding "Holdout Scores" column
# {
# "column_name": "Holdout Scores",
# "sub_columns_names": [
# ["holdout_f1", "F1"],
# ["holdout_roc_auc", "ROC_AUC"]
# ]
# },
# {
# "column_name": "Losses",
# "sub_columns_names": [
# ["train_loss", "Train"],
# ["validation_loss", "Validation"]
# ]
# },
# ]
#
#
# class AdvancedDisplayLayout(object):
# def __init__(self):
# pass
#
#
# class AdvancedFitLogging(object):
# def __init__(self, display_layout=None, ):
# self.display_layout = display_layout or ADVANCED_FIT_LOGGING_DISPLAY_LAYOUT
#
# def _validate_parameters(self):
# pass
#
# def validate_display_layout(self):
# pass
|
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter import exceptions
from hyperparameter_hunter.settings import G
from hyperparameter_hunter.utils.general_utils import now_time, expand_mins_secs
##################################################
# Import Miscellaneous Assets
##################################################
from contextlib import suppress
from datetime import datetime
import inspect
import logging
import os.path
import sys
class ReportingHandler(object):
def __init__(
self,
heartbeat_path=None,
float_format="{:.5f}",
console_params=None,
heartbeat_params=None,
add_frame=False,
):
"""Class in control of logging methods, log formatting, and initializing Experiment logging
Parameters
----------
heartbeat_path: Str path, or None, default=None
If string and valid heartbeat path, logging messages will also be saved in this file
float_format: String, default='{:.5f}'
If not default, must be a valid formatting string for floating point values. If invalid,
default will be used
console_params: Dict, or None, default=None
Parameters passed to :meth:`_configure_console_handler`
heartbeat_params: Dict, or None, default=None
Parameters passed to :meth:`_configure_heartbeat_handler`
add_frame: Boolean, default=False
If True, whenever :meth:`log` is called, the source of the call will be prepended to
the content being logged"""
self.reporting_type = "logging" # TODO: Add `reporting_type` kwarg (logging, advanced)
self.heartbeat_path = heartbeat_path
self.float_format = float_format
self.console_params = console_params or {}
self.heartbeat_params = heartbeat_params or {}
self.add_frame = add_frame
self._validate_parameters()
self._configure_reporting_type()
def _validate_parameters(self):
"""Ensure all logging parameters are properly formatted"""
#################### reporting_type ####################
valid_types = ["logging", "standard", "advanced"]
if not isinstance(self.reporting_type, str):
raise TypeError(f"reporting_type must be a str. Received {self.reporting_type}")
if self.reporting_type not in valid_types:
raise ValueError(f"reporting_type must be in {valid_types}, not {self.reporting_type}")
#################### heartbeat_path ####################
if self.heartbeat_path is not None:
if not isinstance(self.heartbeat_path, str):
raise TypeError(f"heartbeat_path must be a str. Received {self.heartbeat_path}")
head, tail = os.path.split(self.heartbeat_path)
if not tail.endswith(".log"):
raise ValueError(f"heartbeat_path must end in '.log'. Given {self.heartbeat_path}")
if not os.path.exists(head):
raise FileNotFoundError(
f"heartbeat_path must start with an existing dir. Given {self.heartbeat_path}"
)
#################### float_format ####################
if not isinstance(self.float_format, str):
raise TypeError(f"float_format must be a format str. Received {self.float_format}")
if (not self.float_format.startswith("{")) or (not self.float_format.endswith("}")):
raise ValueError(f"float_format must be inside '{{' and '}}'. Got {self.float_format}")
#################### console_params ####################
if not isinstance(self.console_params, dict):
raise TypeError(f"console_params must be dict or None. Given {self.console_params}")
#################### heartbeat_params ####################
if not isinstance(self.heartbeat_params, dict):
raise TypeError(f"heartbeat_params must be dict or None. Given {self.heartbeat_params}")
def _configure_reporting_type(self):
"""Set placeholder logging methods to :attr:`reporting_type` specs and initialize logging"""
if self.reporting_type == "standard":
raise ValueError("Standard logging is not yet implemented. Please choose 'logging'")
# setattr(self, 'log', self._standard_log)
# setattr(self, 'debug', self._standard_debug)
# setattr(self, 'warn', self._standard_warn)
elif self.reporting_type == "logging":
setattr(self, "log", self._logging_log)
setattr(self, "debug", self._logging_debug)
setattr(self, "warn", self._logging_warn)
self._initialize_logging_logging()
elif self.reporting_type == "advanced":
raise ValueError("Advanced logging unimplemented. Please use 'logging'")
def _initialize_logging_logging(self):
"""Initialize and configure logging to be handled by the `logging` library"""
#################### Clear Logging Configuration ####################
root = logging.getLogger()
list(map(root.removeHandler, root.handlers[:]))
list(map(root.removeFilter, root.filters[:]))
#################### Configure Logging ####################
exceptions.hook_exception_handler()
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
handlers = [self._configure_console_handler(**self.console_params)]
# Suppress FileExistsError - Raised when self.heartbeat_path is None, meaning heartbeat blacklisted
with suppress(FileExistsError):
handlers.append(self._configure_heartbeat_handler(**self.heartbeat_params))
logging.basicConfig(handlers=handlers, level=logging.DEBUG)
self.debug("Logging Logging has been initialized!")
# noinspection PyUnusedLocal
@staticmethod
def _configure_console_handler(level="INFO", fmt=None, datefmt="%H:%M:%S", style="%", **kwargs):
"""Configure the console handler in charge of printing log messages
Parameters
----------
level: String, or Int, default='DEBUG'
Minimum message level for the console. Passed to :meth:`logging.StreamHandler.setlevel`
fmt: String, or None, default=None
Message formatting string for the console. Passed to :meth:`logging.Formatter.__init__`
datefmt: String, or None, default="%H:%M:%S"
Date formatting string for the console. Passed to :meth:`logging.Formatter.__init__`.
For the `logging` library default, use `datefmt=None` ("%Y-%m-%d %H:%M:%S" + <ms>)
style: String, default='%'
Type of string formatting used. Passed to :meth:`logging.Formatter.__init__`
**kwargs: Dict
Extra keyword arguments
Returns
-------
console_handler: `logging.StreamHandler` instance
The instantiated handler for the console"""
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(level)
fmt = fmt or "<%(asctime)s> %(message)s"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt, style=style)
console_handler.setFormatter(formatter)
return console_handler
# noinspection PyUnusedLocal
def _configure_heartbeat_handler(
self, level="DEBUG", fmt=None, datefmt=None, style="%", **kwargs
):
"""Configure the file handler in charge of adding log messages to the heartbeat file
Parameters
----------
level: String, or Int, default='DEBUG'
Minimum message level for the heartbeat file. Passed to
:meth:`logging.FileHandler.setlevel`
fmt: String, or None, default=None
Message formatting string for the heartbeat file. Passed to
:meth:`logging.Formatter.__init__`
datefmt: String, or None, default=None
Date formatting string for the heartbeat file. Passed to
:meth:`logging.Formatter.__init__`
style: String, default='%'
Type of string formatting used. Passed to :meth:`logging.Formatter.__init__`
**kwargs: Dict
Extra keyword arguments
Returns
-------
file_handler: `logging.FileHandler` instance
The instantiated handler for the heartbeat file"""
if self.heartbeat_path is None:
raise FileExistsError
file_handler = logging.FileHandler(self.heartbeat_path, mode="w")
file_handler.setLevel(level)
fmt = fmt or "<%(asctime)s> %(levelname)-8s - %(message)s"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt, style=style)
file_handler.setFormatter(formatter)
return file_handler
##################################################
# Placeholder Methods:
##################################################
def log(self, content, **kwargs):
"""Placeholder method before proper initialization"""
def debug(self, content, **kwargs):
"""Placeholder method before proper initialization"""
def warn(self, content, **kwargs):
"""Placeholder method before proper initialization"""
##################################################
# Logging-Logging Methods:
##################################################
# noinspection PyUnusedLocal
def _logging_log(
self, content, verbose_threshold=None, previous_frame=None, add_time=False, **kwargs
):
"""Log an info message via the `logging` library
Parameters
----------
content: String
The message to log
verbose_threshold: Int, or None, default=None
If None, `content` logged normally. If int and `G.Env.verbose` >= `verbose_threshold`,
`content` is logged normally. Else if int and `G.Env.verbose` < `verbose_threshold`,
then `content` is logged on the `logging.debug` level, instead of `logging.info`
previous_frame: Frame, or None, default=None
The frame preceding the log call. If not provided, it will be inferred
add_time: Boolean, default=False
If True, the current time will be added to `content` before logging
**kwargs: Dict
Extra keyword arguments"""
if self.add_frame is True:
previous_frame = previous_frame or inspect.currentframe().f_back
try:
frame_source = format_frame_source(previous_frame)
finally:
del previous_frame
content = f"{frame_source} - {content}"
content = add_time_to_content(content, add_time=add_time)
if (verbose_threshold is None) or (G.Env.verbose >= verbose_threshold):
logging.info(content)
else:
logging.debug(content)
# noinspection PyUnusedLocal
def _logging_debug(self, content, previous_frame=None, add_time=False, **kwargs):
"""Log a debug message via the `logging` library
Parameters
----------
content: String
The message to log
previous_frame: Frame, or None, default=None
The frame preceding the debug call. If not provided, it will be inferred
add_time: Boolean, default=False
If True, the current time will be added to `content` before logging
**kwargs: Dict
Extra keyword arguments"""
if self.add_frame is True:
previous_frame = previous_frame or inspect.currentframe().f_back
try:
frame_source = format_frame_source(previous_frame)
finally:
del previous_frame
content = f"{frame_source} - {content}"
content = add_time_to_content(content, add_time=add_time)
logging.debug(content)
# noinspection PyUnusedLocal
def _logging_warn(self, content, **kwargs):
"""Log a warning message via the `logging` library
Parameters
----------
content: String
The message to log
**kwargs: Dict
Extra keyword arguments"""
if self.add_frame is True:
previous_frame = inspect.currentframe().f_back
try:
frame_source = format_frame_source(previous_frame)
finally:
del previous_frame
content = f"{frame_source} - {content}"
logging.warning(content)
class _Color:
"""Object defining color codes for use with logging"""
BLUE = "\033[34m"
CYAN = "\033[36m"
GREEN = "\033[32m"
MAGENTA = "\033[35m"
RED = "\033[31m"
STOP = "\033[0m"
class OptimizationReporter:
def __init__(self, parameter_names, verbose=1, show_experiment_id=8, do_maximize=True):
"""A MixIn class for reporting the results of hyperparameter optimization rounds
Parameters
----------
parameter_names: List
The names of the hyperparameters being evaluated and optimized
verbose: Int in [0, 1, 2], default=1
If 0, all but critical logging is silenced. If 1, normal logging is performed. If 2,
detailed logging is performed
show_experiment_id: Int, or Boolean, default=8
If True, the experiment_id will be printed in each result row. If False, it will not.
If int, the first `show_experiment_id`-many characters of each experiment_id will be
printed in each row
do_maximize: Boolean, default=True
If False, smaller metric values will be considered preferred and will be highlighted to
stand out. Else larger metric values will be treated as preferred"""
self.original_parameter_names = parameter_names
self.verbose = verbose
self.show_experiment_id = (
36 if (show_experiment_id is True or show_experiment_id > 36) else show_experiment_id
)
self.do_maximize = do_maximize
self.end = " | "
self.y_max = None
self.x_max = None
self.iteration = 0
self.start_time = datetime.now()
self.last_round = datetime.now()
skip = ("model_init_params", "model_extra_params", "feature_engineer", "feature_selector")
self.parameter_names = [_[1:] if _[0] in skip else _ for _ in self.original_parameter_names]
self.parameter_names = [_[1:] if _[0] == "params" else _ for _ in self.parameter_names]
self.parameter_names = [
_[0] if len(_) == 1 else str(_).replace("'", "").replace('"', "")
for _ in self.parameter_names
]
self.sizes = [max(len(_), 7) for _ in self.parameter_names]
self.sorted_indexes = sorted(
range(len(self.parameter_names)), key=self.parameter_names.__getitem__
)
def print_saved_results_header(self):
"""Print a header signifying that saved Experiment results are being read"""
header = f"{_Color.RED}Saved Result Files{_Color.STOP}"
self.print_header(header, (_Color.RED + "_" * self._line_len() + _Color.STOP))
def print_random_points_header(self):
"""Print a header signifying that random point evaluation rounds are starting"""
header = f"{_Color.RED}Random Point Evaluation{_Color.STOP}"
self.print_header(header, (_Color.RED + "_" * self._line_len() + _Color.STOP))
def print_optimization_header(self):
"""Print a header signifying that Optimization rounds are starting"""
header = f"{_Color.RED}Hyperparameter Optimization{_Color.STOP}"
self.print_header(header, (_Color.RED + "_" * self._line_len() + _Color.STOP))
def _line_len(self):
"""Calculate number of characters a header's underlining should span
Returns
-------
line_len: Int
The number of characters the line should span"""
line_len = 29
line_len += sum([_ + 5 for _ in self.sizes])
line_len += self.show_experiment_id + 3 if self.show_experiment_id else 0
return line_len
def print_header(self, header, line):
"""Utility to perform actual printing of headers given formatted inputs
Parameters
----------
header: String
Specifies the stage of optimization being entered, and the type of results to follow
line: String
The underlining to follow `header`"""
print(header)
print(line)
self._print_column_name("Step", 5)
if self.show_experiment_id:
self._print_column_name("ID", self.show_experiment_id)
self._print_column_name("Time", 6)
self._print_column_name("Value", 10)
for index in self.sorted_indexes:
self._print_column_name(self.parameter_names[index], self.sizes[index] + 2)
print("")
def _print_column_name(self, value, size):
"""Print a column name within a specified `size` constraint
Parameters
----------
value: String
The name of the column to print
size: Int
The number of characters that `value` should span"""
try:
print("{0:>{1}}".format(value, size), end=self.end)
except TypeError: # Probably given tuple including param origin (init_params, extra_params, etc.)
if len(value) == 1:
print("{0:>{1}}".format(value[0], size), end=self.end)
else:
print("{0:>{1}}".format(str(value), size), end=self.end)
def print_result(self, hyperparameters, evaluation, experiment_id=None):
"""Print a row containing the results of an Experiment just executed
Parameters
----------
hyperparameters: List
List of hyperparameter values in the same order as :attr:`parameter_names`
evaluation: Float
An evaluation of the performance of `hyperparameters`
experiment_id: Str, or None, default=None
If not None, should be a string that is the UUID of the Experiment"""
if not self.verbose:
return
print("{:>5d}".format(self.iteration), end=self.end)
#################### Experiment ID ####################
if self.show_experiment_id:
if experiment_id is not None:
print("{}".format(experiment_id[: self.show_experiment_id]), end=self.end)
else:
print(" " * self.show_experiment_id, end=self.end)
#################### Time Elapsed ####################
minutes, seconds = divmod((datetime.now() - self.last_round).total_seconds(), 60)
print(expand_mins_secs(minutes, seconds), end=self.end)
#################### Evaluation Result ####################
if (
(self.y_max is None) # First evaluation
or (self.do_maximize and self.y_max < evaluation) # Found new max (best)
or (not self.do_maximize and self.y_max > evaluation) # Found new min (best)
):
self.y_max, self.x_max = evaluation, hyperparameters
self._print_target_value(evaluation, pre=_Color.MAGENTA, post=_Color.STOP)
self._print_input_values(hyperparameters, pre=_Color.GREEN, post=_Color.STOP)
else:
self._print_target_value(evaluation)
self._print_input_values(hyperparameters)
print("")
self.last_round = datetime.now()
self.iteration += 1
def _print_target_value(self, value, pre="", post=""):
"""Print the utility of an Experiment
Parameters
----------
value: String
The utility value to print
pre: String, default=''
Content to prepend to the formatted `value` string before printing
post: String, default=''
Content to append to the formatted `value` string before printing"""
content = pre + "{: >10.5f}".format(value) + post
print(content, end=self.end)
def _print_input_values(self, values, pre="", post=""):
"""Print the value of a hyperparameter used by an Experiment
Parameters
----------
value: String
The hyperparameter value to print
pre: String, default=''
Content to prepend to the formatted `value` string before printing
post: String, default=''
Content to append to the formatted `value` string before printing"""
for index in self.sorted_indexes:
if isinstance(values[index], float):
content = "{0: >{1}.{2}f}".format(
values[index], self.sizes[index] + 2, min(self.sizes[index] - 3, 6 - 2)
)
else:
content = "{0: >{1}}".format(values[index], self.sizes[index] + 2)
print(pre + content + post, end=self.end)
def reset_timer(self):
"""Set :attr:`start_time`, and :attr:`last_round` to the current time"""
self.start_time = datetime.now()
self.last_round = datetime.now()
def print_summary(self):
"""Print a summary of the results of hyperparameter optimization upon completion"""
# TODO: Finish this
if not self.verbose:
return
def format_frame_source(previous_frame, **kwargs):
"""Construct a string describing the location at which a call was made
Parameters
----------
previous_frame: Frame
A frame depicting the location at which a call was made
**kwargs: Dict
Any additional kwargs to supply to :func:`reporting.stringify_frame_source`
Returns
-------
The stringified frame source information of `previous_frame`"""
source = inspect.getframeinfo(previous_frame)
src_script, src_line_no, src_func, src_class = source[0], source[1], source[2], None
with suppress(AttributeError, KeyError):
src_class = type(previous_frame.f_locals["self"]).__name__
return stringify_frame_source(src_script, src_line_no, src_func, src_class, **kwargs)
def stringify_frame_source(
src_file,
src_line_no,
src_func,
src_class,
add_line_no=True,
max_line_no_size=4,
total_max_size=80,
):
"""Construct a string that neatly displays the location in the code at which a call was made
Parameters
----------
src_file: Str
A filepath
src_line_no: Int
The line number in `src_file` at which the call was made
src_func: Str
The name of the function in `src_file` in which the call was made
src_class: Str, or None
If not None, the class in `src_file` in which the call was made
add_line_no: Boolean, default=False
If True, the line number will be included in the `source_content` result
max_line_no_size: Int, default=4
Total number (including padding) of characters to be occupied by `src_line_no`. For
example, if `src_line_no`=32, and `max_line_no_size`=4, `src_line_no` will be padded to
become '32 ' in order to occupy four characters
total_max_size: Int, default=80
Total number (including padding) of characters to be occupied by the `source_content` result
Returns
-------
source_content: Str
A formatted string containing the location in the code at which a call was made
Examples
--------
>>> stringify_frame_source("reporting.py", 570, "stringify_frame_source", None)
'570 - reporting.stringify_frame_source() '
>>> stringify_frame_source("reporting.py", 12, "bar", "Foo")
'12 - reporting.Foo.bar() '
>>> stringify_frame_source("reporting.py", 12, "bar", "Foo", add_line_no=False)
'reporting.Foo.bar() '
>>> stringify_frame_source("reporting.py", 12, "bar", "Foo", total_max_size=60)
'12 - reporting.Foo.bar() '"""
source_content = ""
if add_line_no is True:
# Left-align line_no to size: max_line_no_size
source_content += "{0:<{1}}".format(src_line_no, max_line_no_size)
source_content += " - "
script_name = os.path.splitext(os.path.basename(src_file))[0]
if src_class is not None:
source_content += "{}.{}.{}()".format(script_name, src_class, src_func)
else:
source_content += "{}.{}()".format(script_name, src_func)
source_content = "{0:<{1}}".format(source_content, total_max_size)
return source_content
def add_time_to_content(content, add_time=False):
"""Construct a string containing the original `content`, in addition to the current time
Parameters
----------
content: Str
The original string, to which the current time will be concatenated
add_time: Boolean, default=False
If True, the current time will be concatenated onto the end of `content`
Returns
-------
content: Str
Str containing original `content`, along with current time, and additional formatting"""
add_content = ""
add_time = now_time() if add_time is True else add_time
add_content += "Time: {}".format(add_time) if add_time else ""
#################### Combine Original and New Content ####################
if add_content != "":
content += " " if ((content != "") and (not content.endswith(" "))) else ""
content += add_content
return content
def format_fold_run(rep=None, fold=None, run=None, mode="concise"):
"""Construct a string to display the repetition, fold, and run currently being executed
Parameters
----------
rep: Int, or None, default=None
The repetition number currently being executed
fold: Int, or None, default=None
The fold number currently being executed
run: Int, or None, default=None
The run number currently being executed
mode: {"concise", "verbose"}, default="concise"
If "concise", the result will contain abbreviations for rep/fold/run
Returns
-------
content: Str
A clean display of the current repetition/fold/run
Examples
--------
>>> format_fold_run(rep=0, fold=3, run=2, mode="concise")
'R0-f3-r2'
>>> format_fold_run(rep=0, fold=3, run=2, mode="verbose")
'Rep-Fold-Run: 0-3-2'
>>> format_fold_run(rep=0, fold=3, run="*", mode="concise")
'R0-f3-r*'
>>> format_fold_run(rep=0, fold=3, run=2, mode="foo")
Traceback (most recent call last):
File "reporting.py", line ?, in format_fold_run
ValueError: Received invalid mode value: 'foo'"""
content = ""
if mode == "verbose":
content += format("Rep" if rep is not None else "")
content += format("-" if rep is not None and fold is not None else "")
content += format("Fold" if fold is not None else "")
content += format("-" if fold is not None and run is not None else "")
content += format("Run" if run is not None else "")
content += format(": " if any(_ is not None for _ in [rep, fold, run]) else "")
content += format(rep if rep is not None else "")
content += format("-" if rep is not None and fold is not None else "")
content += format(fold if fold is not None else "")
content += format("-" if fold is not None and run is not None else "")
content += format(run if run is not None else "")
elif mode == "concise":
content += format("R" if rep is not None else "")
content += format(rep if rep is not None else "")
content += format("-" if rep is not None and fold is not None else "")
content += format("f" if fold is not None else "")
content += format(fold if fold is not None else "")
content += format("-" if fold is not None and run is not None else "")
content += format("r" if run is not None else "")
content += format(run if run is not None else "")
else:
raise ValueError("Received invalid mode value: '{}'".format(mode))
return content
def format_evaluation(results, separator=" | ", float_format="{:.5f}"):
"""Construct a string to neatly display the results of a model evaluation
Parameters
----------
results: Dict
The results of a model evaluation, in which keys represent the dataset type evaluated, and
values are dicts containing metrics as keys, and metric values as values
separator: Str, default=' | '
The string used to join all the metric values into a single string
float_format: Str, default='{:.5f}'
A python string float formatter, applied to floating metric values
Returns
-------
content: Str
The model's evaluation results"""
content = []
for data_type, values in results.items():
if values is None:
continue
data_type = "OOF" if data_type == "oof" else data_type
data_type = "Holdout" if data_type == "holdout" else data_type
data_type = "In-Fold" if data_type == "in_fold" else data_type
metric_entry = "{}(".format(data_type)
metric_entry_vals = []
for metric_id, metric_value in values.items():
try:
formatted_value = float_format.format(metric_value)
except ValueError:
formatted_value = "{}".format(metric_value)
metric_entry_vals.append("{}={}".format(metric_id, formatted_value))
metric_entry += ", ".join(metric_entry_vals) + ")"
content.append(metric_entry)
content = separator.join(content)
return content
# ADVANCED_FIT_LOGGING_DISPLAY_LAYOUT = [
# {
# "column_name": "General",
# "sub_columns_names": [
# ["fold", "Fold"],
# ["run", "Run"],
# ["seed", "Seed"],
# ["step", "Step"],
# ["start_time", "Start Time"],
# ["end_time", "End Time"],
# ["time_elapsed", "Time Elapsed"]
# ],
# "sub_column_min_sizes": [10, 10, 10, 20, 12, 12, 12]
# },
# # Will need to alter default "Score" sub-columns according to what metrics are actually being used
# {
# "column_name": "OOF Scores",
# "sub_columns_names": [
# ["oof_f1", "F1"],
# ["oof_roc_auc", "ROC_AUC"]
# ]
# },
# # Check that Holdout dataset is in use before adding "Holdout Scores" column
# {
# "column_name": "Holdout Scores",
# "sub_columns_names": [
# ["holdout_f1", "F1"],
# ["holdout_roc_auc", "ROC_AUC"]
# ]
# },
# {
# "column_name": "Losses",
# "sub_columns_names": [
# ["train_loss", "Train"],
# ["validation_loss", "Validation"]
# ]
# },
# ]
#
#
# class AdvancedDisplayLayout(object):
# def __init__(self):
# pass
#
#
# class AdvancedFitLogging(object):
# def __init__(self, display_layout=None, ):
# self.display_layout = display_layout or ADVANCED_FIT_LOGGING_DISPLAY_LAYOUT
#
# def _validate_parameters(self):
# pass
#
# def validate_display_layout(self):
# pass
|
import logging
import sys
import itertools
import time
import click
import click_log
import tqdm
import pysam
import multiprocessing as mp
from inspect import getframeinfo, currentframe, getdoc
from ..utils import bam_utils
from ..utils.model import LibraryModel
from ..annotate.command import get_segments
from ..meta import VERSION
logging.basicConfig(stream=sys.stderr)
logger = logging.getLogger("scsplit")
click_log.basic_config(logger)
__DEFAULT_DUMMY_CBC = "CTGCCTAACCTGATCC"
__DEFAULT_OUT_BASE_NAME = logger.name
__DEFAULT_UMI_LENGTH = 10
__OUT_READ_FILE_SUFFIX = "_mates"
__OUT_WHITELIST_FILE_SUFFIX = "_whitelist.txt"
@click.command(name=logger.name)
@click_log.simple_verbosity_option(logger)
@click.option(
"-t",
"--threads",
type=int,
default=mp.cpu_count() - 1,
show_default=True,
help="number of threads to use (0 for all)",
)
@click.option(
"-o",
"--output-base-name",
default=__DEFAULT_OUT_BASE_NAME,
type=str,
help=f"base name for output files [default: {__DEFAULT_OUT_BASE_NAME}]",
)
@click.option(
"-c",
"--cell-barcode",
default=__DEFAULT_DUMMY_CBC,
type=str,
help=f"dummy cell barcode to use for the dataset [default: {__DEFAULT_DUMMY_CBC}, "
f"length: {len(__DEFAULT_DUMMY_CBC)}]",
)
@click.option(
"-u",
"--umi-length",
default=__DEFAULT_UMI_LENGTH,
type=int,
show_default=True,
help=f"length of the UMI from this library",
)
@click.option(
"-b",
"--write-bam",
is_flag=True,
default=False,
show_default=True,
help=f"Write out an annotated bam file in addition to the mates files.",
)
@click.option(
'--force',
is_flag=True,
default=False,
show_default=True,
help="Force scsplit to run on the input bam without checking for compatibility."
)
@click.option(
"-m",
"--model",
default="mas15",
show_default=True,
help="The model to use for annotation. If the given value is a pre-configured model name, then that "
"model will be used. Otherwise, the given value will be treated as a file name and Longbow will attempt to "
"read in the file and create a LibraryModel from it. Longbow will assume the contents are the configuration "
"of a LibraryModel as per LibraryModel.to_json()."
)
@click.argument("input-bam", default="-" if not sys.stdin.isatty() else None, type=click.File("rb"))
def main(threads, output_base_name, cell_barcode, umi_length, force, model, write_bam, input_bam):
"""Create files for use in `alevin` for single-cell analysis.
This tool coerces a set of reads from a single source into a format that `alevin` can ingest.
Segment names are assumed to be those in the default model (utils/model.py).
INPUT_BAM should contain reads that have been processed by `longbow segment`.
The output from this tool consists of several files:
OUTPUT_BASE_NAME_mates_1.fastq:
A file containing partial sequences for all reads in the given input file. These partial reads consist of the
dummy cell barcode + detected UMI for each read in the given input file.
OUTPUT_BASE_NAME_mates_2.fastq:
A file containing partial sequences for all reads in the given input file. These partial reads consist of the
transcript sequences for all reads in the given input file. Transcript sequences include data after the UMI
and before the Poly-A tail. All bases outside of this range are excluded from the output.
OUTPUT_BASE_NAME_whitelist.txt:
A whitelist file for alevin containing the given dummy cell barcode.
"""
t_start = time.time()
logger.info("Invoked via: longbow %s", " ".join(sys.argv[1:]))
threads = mp.cpu_count() if threads <= 0 or threads > mp.cpu_count() else threads
logger.info(f"Running with {threads} worker subprocess(es)")
# Get our model:
if LibraryModel.has_prebuilt_model(model):
logger.info(f"Using %s", LibraryModel.pre_configured_models[model]["description"])
model = LibraryModel.build_pre_configured_model(model)
else:
logger.info(f"Loading model from json file: %s", model)
model = LibraryModel.from_json_file(model)
# Configure process manager:
# NOTE: We're using processes to overcome the Global Interpreter Lock.
manager = mp.Manager()
process_input_data_queue = manager.Queue(threads)
results = manager.Queue()
# Start worker sub-processes:
worker_process_pool = []
for _ in range(threads):
p = mp.Process(
target=_sub_process_work_fn, args=(process_input_data_queue, results, umi_length, model, write_bam)
)
p.start()
worker_process_pool.append(p)
pysam.set_verbosity(0) # silence message about the .bai file not being found
with pysam.AlignmentFile(
input_bam, "rb", check_sq=False, require_index=False
) as bam_file, tqdm.tqdm(
desc="Progress",
unit=" read",
colour="green",
file=sys.stderr,
leave=False,
disable=not sys.stdin.isatty(),
) as pbar:
if force:
logger.info("Force mode - skipping bam header check for compatibility")
else:
# Make sure we're given an input bam file we can work with:
if not _validate_input_bam(bam_file.header):
# Bad news - we have to quit.
# let's try to do it nicely:
for r in (None,) * threads:
process_input_data_queue.put(r)
# Wait for our input jobs to finish:
for p in worker_process_pool:
p.join()
sys.exit(1)
# Get our header from the input bam file:
out_header = bam_utils.create_bam_header_with_program_group(logger.name, bam_file.header, models=[model])
# Start output worker:
res = manager.dict({"num_reads_processed": 0})
output_worker = mp.Process(
target=_sub_process_write_fn,
args=(
results,
output_base_name,
cell_barcode,
pbar,
res,
write_bam,
out_header
),
)
output_worker.start()
# Add in a sentinel value at the end of the queue - one for each subprocess - so we guarantee
# that all subprocesses will exit:
iter_data = itertools.chain(bam_file, (None,) * threads)
for r in iter_data:
if r is not None:
process_input_data_queue.put(r.to_string())
else:
process_input_data_queue.put(r)
# Wait for our input jobs to finish:
for p in worker_process_pool:
p.join()
# Now that our input processes are done, we can add our exit sentinel onto the output queue and
# wait for that process to end:
results.put(None)
output_worker.join()
# Write out our CBC whitelist file:
with open(f"{output_base_name}{__OUT_WHITELIST_FILE_SUFFIX}", "w") as f:
f.write(f"{cell_barcode}\n")
logger.info(f"Processed {res["num_reads_processed"]} reads.")
logger.info(f"CBC length: {len(cell_barcode)}.")
logger.info(f"UMI length: {umi_length}.")
logger.info(f"Done. Elapsed time: %2.2fs.", time.time() - t_start)
def _validate_input_bam(input_bam_header):
"""Check that the given input_bam_header contains an `longbow segment` program group."""
in_bam_header_dict = input_bam_header.to_dict()
if "PG" not in in_bam_header_dict:
logger.warning("Could not find PG entry in header. Cannot confirm that this file is compatible.")
else:
found_segment_cmd = False
for info in [item for item in in_bam_header_dict["PG"]]:
if "PN" not in info:
continue
if info["PN"] == "longbow" and info["ID"].split("-")[1] == "segment":
found_segment_cmd = True
break
if not found_segment_cmd:
logger.error(
"Input bam file header does not indicate that it was created by longbow segment. "
"This tool requires `longbow segment` reads as input data.")
return False
return True
def _get_start_segment_from_list(seg_list, model, read_name):
"""Get the start segment segment from the list of SegmentInfo objects based on the given model.
If no start segment is found, returns None."""
# The start segment should be the first matching segment:
for s in seg_list:
if s.name in model.start_element_names:
return s
logger.warning("Could not process read: %s - No start segment found (start names: %s).",
read_name, model.start_element_names)
return None
def _get_end_segment_from_list(seg_list, model, read_name):
"""Get the end segment segment from the list of SegmentInfo objects based on the given model.
If no start segment is found, returns None."""
# The end segment should be the last matching segment, so we
# iterate from the end to the start of the list:
for s in reversed(seg_list):
if s.name in model.end_element_names:
return s
logger.warning("Could not process read: %s - No end segment found (end names: %s).",
read_name, model.start_element_names)
return None
def _sub_process_work_fn(in_queue, out_queue, umi_length, array_model, do_bam_out):
"""Function to run in each subprocess.
Extracts and returns all segments from an input read."""
while True:
# Wait until we get some data.
# Note: Because we have a sentinel value None inserted at the end of the input data for each
# subprocess, we don't have to add a timeout - we're guaranteed each process will always have
# at least one element.
raw_data = in_queue.get()
# Check for exit sentinel:
if raw_data is None:
return
# Unpack our data here:
read = pysam.AlignedSegment.fromstring(
raw_data, pysam.AlignmentHeader.from_dict(dict())
)
_, segments = get_segments(read)
# Get start element position
# (for MAS-seq it's the 10x adapter)
start_segment = _get_start_segment_from_list(segments, array_model, read.query_name)
if start_segment is None:
continue
# Get the end element position:
# (for MAS-seq it's the Poly-a)
end_segment = _get_end_segment_from_list(segments, array_model, read.query_name)
if end_segment is None:
continue
# Now we grab the bases just after the 10x adapter as the UMI
# and the bases between the UMI and the poly A for the transcript
# Note: Positions are inclusive so we must add 1 to the end position to get that base as well:
umi_start = start_segment.end+1
umi_end = umi_start + umi_length
umi_bases = read.query_sequence[umi_start:umi_end]
umi_quals = "".join([chr(i + 33) for i in read.query_alignment_qualities[umi_start:umi_end]])
transcript_bases = read.query_sequence[umi_end:end_segment.start]
transcript_quals = "".join(
[chr(i + 33) for i in read.query_alignment_qualities[umi_end:end_segment.start]]
)
# Place our data on the output queue:
if do_bam_out:
out_queue.put(
tuple([read.query_name, umi_bases, umi_quals, transcript_bases, transcript_quals, read.to_string()])
)
else:
out_queue.put(
tuple([read.query_name, umi_bases, umi_quals, transcript_bases, transcript_quals])
)
def _sub_process_write_fn(
out_queue,
out_base_name,
cell_barcode,
pbar,
res,
do_bam_out,
out_bam_header
):
"""Thread / process fn to write out all our data."""
try:
if do_bam_out:
out_bam_file = pysam.AlignmentFile(f"{out_base_name}.cbc_umi_annotated.bam", "wb", header=out_bam_header)
with open(f"{out_base_name}{__OUT_READ_FILE_SUFFIX}1.fastq", "w") as mates1_file, \
open(f"{out_base_name}{__OUT_READ_FILE_SUFFIX}2.fastq", "w") as mates2_file:
while True:
# Wait for some output data:
raw_data = out_queue.get()
# Check for exit sentinel:
if raw_data is None:
break
# Unpack data:
if do_bam_out:
read_name, umi_bases, umi_quals, transcript_bases, transcript_quals, read_string = raw_data
else:
read_name, umi_bases, umi_quals, transcript_bases, transcript_quals = raw_data
# Create mates1 and mates2 records:
mates_1_record = pysam.FastxRecord(
name=read_name,
sequence=cell_barcode + umi_bases,
quality=(chr(33 + 60) * len(cell_barcode)) + umi_quals
)
mates_2_record = pysam.FastxRecord(
name=read_name,
sequence=transcript_bases,
quality=transcript_quals
)
# Write out mates1 and mates2 records:
mates1_file.write(str(mates_1_record))
mates1_file.write("\n")
mates2_file.write(str(mates_2_record))
mates2_file.write("\n")
if do_bam_out:
read = pysam.AlignedSegment.fromstring(
read_string, pysam.AlignmentHeader.from_dict(dict())
)
read.set_tag("CR", cell_barcode)
read.set_tag("UR", umi_bases)
out_bam_file.write(read)
# Increment our counters:
res["num_reads_processed"] += 1
pbar.update(1)
# Obligatory log message:
logger.debug("Processed read: %s", read_name)
finally:
if do_bam_out:
out_bam_file.close()
|
import logging
import sys
import itertools
import time
import click
import click_log
import tqdm
import pysam
import multiprocessing as mp
from inspect import getframeinfo, currentframe, getdoc
from ..utils import bam_utils
from ..utils.model import LibraryModel
from ..annotate.command import get_segments
from ..meta import VERSION
logging.basicConfig(stream=sys.stderr)
logger = logging.getLogger("scsplit")
click_log.basic_config(logger)
__DEFAULT_DUMMY_CBC = "CTGCCTAACCTGATCC"
__DEFAULT_OUT_BASE_NAME = logger.name
__DEFAULT_UMI_LENGTH = 10
__OUT_READ_FILE_SUFFIX = "_mates"
__OUT_WHITELIST_FILE_SUFFIX = "_whitelist.txt"
@click.command(name=logger.name)
@click_log.simple_verbosity_option(logger)
@click.option(
"-t",
"--threads",
type=int,
default=mp.cpu_count() - 1,
show_default=True,
help="number of threads to use (0 for all)",
)
@click.option(
"-o",
"--output-base-name",
default=__DEFAULT_OUT_BASE_NAME,
type=str,
help=f"base name for output files [default: {__DEFAULT_OUT_BASE_NAME}]",
)
@click.option(
"-c",
"--cell-barcode",
default=__DEFAULT_DUMMY_CBC,
type=str,
help=f"dummy cell barcode to use for the dataset [default: {__DEFAULT_DUMMY_CBC}, "
f"length: {len(__DEFAULT_DUMMY_CBC)}]",
)
@click.option(
"-u",
"--umi-length",
default=__DEFAULT_UMI_LENGTH,
type=int,
show_default=True,
help=f"length of the UMI from this library",
)
@click.option(
"-b",
"--write-bam",
is_flag=True,
default=False,
show_default=True,
help=f"Write out an annotated bam file in addition to the mates files.",
)
@click.option(
'--force',
is_flag=True,
default=False,
show_default=True,
help="Force scsplit to run on the input bam without checking for compatibility."
)
@click.option(
"-m",
"--model",
default="mas15",
show_default=True,
help="The model to use for annotation. If the given value is a pre-configured model name, then that "
"model will be used. Otherwise, the given value will be treated as a file name and Longbow will attempt to "
"read in the file and create a LibraryModel from it. Longbow will assume the contents are the configuration "
"of a LibraryModel as per LibraryModel.to_json()."
)
@click.argument("input-bam", default="-" if not sys.stdin.isatty() else None, type=click.File("rb"))
def main(threads, output_base_name, cell_barcode, umi_length, force, model, write_bam, input_bam):
"""Create files for use in `alevin` for single-cell analysis.
This tool coerces a set of reads from a single source into a format that `alevin` can ingest.
Segment names are assumed to be those in the default model (utils/model.py).
INPUT_BAM should contain reads that have been processed by `longbow segment`.
The output from this tool consists of several files:
OUTPUT_BASE_NAME_mates_1.fastq:
A file containing partial sequences for all reads in the given input file. These partial reads consist of the
dummy cell barcode + detected UMI for each read in the given input file.
OUTPUT_BASE_NAME_mates_2.fastq:
A file containing partial sequences for all reads in the given input file. These partial reads consist of the
transcript sequences for all reads in the given input file. Transcript sequences include data after the UMI
and before the Poly-A tail. All bases outside of this range are excluded from the output.
OUTPUT_BASE_NAME_whitelist.txt:
A whitelist file for alevin containing the given dummy cell barcode.
"""
t_start = time.time()
logger.info("Invoked via: longbow %s", " ".join(sys.argv[1:]))
threads = mp.cpu_count() if threads <= 0 or threads > mp.cpu_count() else threads
logger.info(f"Running with {threads} worker subprocess(es)")
# Get our model:
if LibraryModel.has_prebuilt_model(model):
logger.info(f"Using %s", LibraryModel.pre_configured_models[model]["description"])
model = LibraryModel.build_pre_configured_model(model)
else:
logger.info(f"Loading model from json file: %s", model)
model = LibraryModel.from_json_file(model)
# Configure process manager:
# NOTE: We're using processes to overcome the Global Interpreter Lock.
manager = mp.Manager()
process_input_data_queue = manager.Queue(threads)
results = manager.Queue()
# Start worker sub-processes:
worker_process_pool = []
for _ in range(threads):
p = mp.Process(
target=_sub_process_work_fn, args=(process_input_data_queue, results, umi_length, model, write_bam)
)
p.start()
worker_process_pool.append(p)
pysam.set_verbosity(0) # silence message about the .bai file not being found
with pysam.AlignmentFile(
input_bam, "rb", check_sq=False, require_index=False
) as bam_file, tqdm.tqdm(
desc="Progress",
unit=" read",
colour="green",
file=sys.stderr,
leave=False,
disable=not sys.stdin.isatty(),
) as pbar:
if force:
logger.info("Force mode - skipping bam header check for compatibility")
else:
# Make sure we're given an input bam file we can work with:
if not _validate_input_bam(bam_file.header):
# Bad news - we have to quit.
# let's try to do it nicely:
for r in (None,) * threads:
process_input_data_queue.put(r)
# Wait for our input jobs to finish:
for p in worker_process_pool:
p.join()
sys.exit(1)
# Get our header from the input bam file:
out_header = bam_utils.create_bam_header_with_program_group(logger.name, bam_file.header, models=[model])
# Start output worker:
res = manager.dict({"num_reads_processed": 0})
output_worker = mp.Process(
target=_sub_process_write_fn,
args=(
results,
output_base_name,
cell_barcode,
pbar,
res,
write_bam,
out_header
),
)
output_worker.start()
# Add in a sentinel value at the end of the queue - one for each subprocess - so we guarantee
# that all subprocesses will exit:
iter_data = itertools.chain(bam_file, (None,) * threads)
for r in iter_data:
if r is not None:
process_input_data_queue.put(r.to_string())
else:
process_input_data_queue.put(r)
# Wait for our input jobs to finish:
for p in worker_process_pool:
p.join()
# Now that our input processes are done, we can add our exit sentinel onto the output queue and
# wait for that process to end:
results.put(None)
output_worker.join()
# Write out our CBC whitelist file:
with open(f"{output_base_name}{__OUT_WHITELIST_FILE_SUFFIX}", "w") as f:
f.write(f"{cell_barcode}\n")
logger.info(f"Processed {res['num_reads_processed']} reads.")
logger.info(f"CBC length: {len(cell_barcode)}.")
logger.info(f"UMI length: {umi_length}.")
logger.info(f"Done. Elapsed time: %2.2fs.", time.time() - t_start)
def _validate_input_bam(input_bam_header):
"""Check that the given input_bam_header contains an `longbow segment` program group."""
in_bam_header_dict = input_bam_header.to_dict()
if "PG" not in in_bam_header_dict:
logger.warning("Could not find PG entry in header. Cannot confirm that this file is compatible.")
else:
found_segment_cmd = False
for info in [item for item in in_bam_header_dict["PG"]]:
if "PN" not in info:
continue
if info["PN"] == "longbow" and info["ID"].split("-")[1] == "segment":
found_segment_cmd = True
break
if not found_segment_cmd:
logger.error(
"Input bam file header does not indicate that it was created by longbow segment. "
"This tool requires `longbow segment` reads as input data.")
return False
return True
def _get_start_segment_from_list(seg_list, model, read_name):
"""Get the start segment segment from the list of SegmentInfo objects based on the given model.
If no start segment is found, returns None."""
# The start segment should be the first matching segment:
for s in seg_list:
if s.name in model.start_element_names:
return s
logger.warning("Could not process read: %s - No start segment found (start names: %s).",
read_name, model.start_element_names)
return None
def _get_end_segment_from_list(seg_list, model, read_name):
"""Get the end segment segment from the list of SegmentInfo objects based on the given model.
If no start segment is found, returns None."""
# The end segment should be the last matching segment, so we
# iterate from the end to the start of the list:
for s in reversed(seg_list):
if s.name in model.end_element_names:
return s
logger.warning("Could not process read: %s - No end segment found (end names: %s).",
read_name, model.start_element_names)
return None
def _sub_process_work_fn(in_queue, out_queue, umi_length, array_model, do_bam_out):
"""Function to run in each subprocess.
Extracts and returns all segments from an input read."""
while True:
# Wait until we get some data.
# Note: Because we have a sentinel value None inserted at the end of the input data for each
# subprocess, we don't have to add a timeout - we're guaranteed each process will always have
# at least one element.
raw_data = in_queue.get()
# Check for exit sentinel:
if raw_data is None:
return
# Unpack our data here:
read = pysam.AlignedSegment.fromstring(
raw_data, pysam.AlignmentHeader.from_dict(dict())
)
_, segments = get_segments(read)
# Get start element position
# (for MAS-seq it's the 10x adapter)
start_segment = _get_start_segment_from_list(segments, array_model, read.query_name)
if start_segment is None:
continue
# Get the end element position:
# (for MAS-seq it's the Poly-a)
end_segment = _get_end_segment_from_list(segments, array_model, read.query_name)
if end_segment is None:
continue
# Now we grab the bases just after the 10x adapter as the UMI
# and the bases between the UMI and the poly A for the transcript
# Note: Positions are inclusive so we must add 1 to the end position to get that base as well:
umi_start = start_segment.end+1
umi_end = umi_start + umi_length
umi_bases = read.query_sequence[umi_start:umi_end]
umi_quals = "".join([chr(i + 33) for i in read.query_alignment_qualities[umi_start:umi_end]])
transcript_bases = read.query_sequence[umi_end:end_segment.start]
transcript_quals = "".join(
[chr(i + 33) for i in read.query_alignment_qualities[umi_end:end_segment.start]]
)
# Place our data on the output queue:
if do_bam_out:
out_queue.put(
tuple([read.query_name, umi_bases, umi_quals, transcript_bases, transcript_quals, read.to_string()])
)
else:
out_queue.put(
tuple([read.query_name, umi_bases, umi_quals, transcript_bases, transcript_quals])
)
def _sub_process_write_fn(
out_queue,
out_base_name,
cell_barcode,
pbar,
res,
do_bam_out,
out_bam_header
):
"""Thread / process fn to write out all our data."""
try:
if do_bam_out:
out_bam_file = pysam.AlignmentFile(f"{out_base_name}.cbc_umi_annotated.bam", "wb", header=out_bam_header)
with open(f"{out_base_name}{__OUT_READ_FILE_SUFFIX}1.fastq", "w") as mates1_file, \
open(f"{out_base_name}{__OUT_READ_FILE_SUFFIX}2.fastq", "w") as mates2_file:
while True:
# Wait for some output data:
raw_data = out_queue.get()
# Check for exit sentinel:
if raw_data is None:
break
# Unpack data:
if do_bam_out:
read_name, umi_bases, umi_quals, transcript_bases, transcript_quals, read_string = raw_data
else:
read_name, umi_bases, umi_quals, transcript_bases, transcript_quals = raw_data
# Create mates1 and mates2 records:
mates_1_record = pysam.FastxRecord(
name=read_name,
sequence=cell_barcode + umi_bases,
quality=(chr(33 + 60) * len(cell_barcode)) + umi_quals
)
mates_2_record = pysam.FastxRecord(
name=read_name,
sequence=transcript_bases,
quality=transcript_quals
)
# Write out mates1 and mates2 records:
mates1_file.write(str(mates_1_record))
mates1_file.write("\n")
mates2_file.write(str(mates_2_record))
mates2_file.write("\n")
if do_bam_out:
read = pysam.AlignedSegment.fromstring(
read_string, pysam.AlignmentHeader.from_dict(dict())
)
read.set_tag("CR", cell_barcode)
read.set_tag("UR", umi_bases)
out_bam_file.write(read)
# Increment our counters:
res["num_reads_processed"] += 1
pbar.update(1)
# Obligatory log message:
logger.debug("Processed read: %s", read_name)
finally:
if do_bam_out:
out_bam_file.close()
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# ReCode by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot & t.me/Lunatic0de
import random
import time
from datetime import datetime
from speedtest import Speedtest
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, StartTime, bot
from userbot.events import man_cmd, register
from userbot.utils import humanbytes
absen = [
"**Hadir bang** 😁",
"**Hadir kak** 😉",
"**Hadir dong** 😁",
"**Hadir ganteng** 🥵",
"**Hadir bro** 😎",
"**Hadir kak maap telat** 🥺",
]
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["s", "m", "Jam", "Hari"]
while count < 4:
count += 1
remainder, result = divmod(seconds, 60) if count < 3 else divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
@bot.on(man_cmd(outgoing=True, pattern=r"ping$"))
async def _(ping):
"""For .ping command, ping the userbot from any chat."""
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
await ping.edit("**✣**")
await ping.edit("**✣✣**")
await ping.edit("**✣✣✣**")
await ping.edit("**✣✣✣✣**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await bot.get_me()
await ping.edit(
f"**PONG!!🏓**\n"
f"✣ **Pinger** - `%sms`\n"
f"✣ **Uptime -** `{uptime}` \n"
f"**✦҈͜͡Owner :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@bot.on(man_cmd(outgoing=True, pattern=r"xping$"))
async def _(ping):
"""For .ping command, ping the userbot from any chat."""
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
await ping.edit("`Pinging....`")
end = datetime.now()
duration = (end - start).microseconds / 1000
await ping.edit(
f"**PONG!! 🍭**\n**Pinger** : %sms\n**Bot Uptime** : {uptime}🕛" % (duration)
)
@bot.on(man_cmd(outgoing=True, pattern=r"lping$"))
async def _(ping):
"""For .ping command, ping the userbot from any chat."""
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
await ping.edit("**★ PING ★**")
await ping.edit("**★★ PING ★★**")
await ping.edit("**★★★ PING ★★★**")
await ping.edit("**★★★★ PING ★★★★**")
await ping.edit("**✦҈͜͡➳ PONG!**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await bot.get_me()
await ping.edit(
f"❃ **Ping !!** "
f"`%sms` \n"
f"❃ **Uptime -** "
f"`{uptime}` \n"
f"**✦҈͜͡➳ Master :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@bot.on(man_cmd(outgoing=True, pattern=r"fping$"))
async def _(f):
"""For .ping command, ping the userbot from any chat."""
await get_readable_time((time.time() - StartTime))
start = datetime.now()
await f.edit(". /¯ )")
await f.edit(". /¯ )\n /¯ /")
await f.edit(
". /¯ )\n /¯ /\n / /"
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸"
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ "
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ \n ('( ( ( ( ¯~/' ')"
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ \n ('( ( ( ( ¯~/' ')\n \\ /"
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ \n ('( ( ( ( ¯~/' ')\n \\ /\n \\ _.•´"
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ \n ('( ( ( ( ¯~/' ')\n \\ /\n \\ _.•´\n \\ ("
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ \n ('( ( ( ( ¯~/' ')\n \\ /\n \\ _.•´\n \\ (\n \\ "
)
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await bot.get_me()
await f.edit(
f"**PONG!!🏓**\n"
f"✣ **Pinger** - `%sms`\n"
f"✣ **Uptime -** `{uptime}` \n"
f"**✦҈͜͡Owner :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@bot.on(man_cmd(outgoing=True, pattern=r"keping$"))
async def _(pong):
await get_readable_time((time.time() - StartTime))
start = datetime.now()
await pong.edit("**『⍟𝐊𝐎𝐍𝐓𝐎𝐋』**")
await pong.edit("**◆◈𝐊𝐀𝐌𝐏𝐀𝐍𝐆◈◆**")
await pong.edit("**𝐏𝐄𝐂𝐀𝐇𝐊𝐀𝐍 𝐁𝐈𝐉𝐈 𝐊𝐀𝐔 𝐀𝐒𝐔**")
await pong.edit("**☬𝐒𝐈𝐀𝐏 𝐊𝐀𝐌𝐏𝐀𝐍𝐆 𝐌𝐄𝐍𝐔𝐌𝐁𝐔𝐊 𝐀𝐒𝐔☬**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await bot.get_me()
await pong.edit(
f"**✲ 𝙺𝙾𝙽𝚃𝙾𝙻 𝙼𝙴𝙻𝙴𝙳𝚄𝙶** "
f"\n ⫸ ᴷᵒⁿᵗᵒˡ `%sms` \n"
f"**✲ 𝙱𝙸𝙹𝙸 𝙿𝙴𝙻𝙴𝚁** "
f"\n ⫸ ᴷᵃᵐᵖᵃⁿᵍ『[{user.first_name}](tg://user?id={user.id})』 \n" % (duration)
)
# .keping & kping Coded by Koala
@bot.on(man_cmd(outgoing=True, pattern=r"kping$"))
async def _(pong):
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
await pong.edit("8✊===D")
await pong.edit("8=✊==D")
await pong.edit("8==✊=D")
await pong.edit("8===✊D")
await pong.edit("8==✊=D")
await pong.edit("8=✊==D")
await pong.edit("8✊===D")
await pong.edit("8=✊==D")
await pong.edit("8==✊=D")
await pong.edit("8===✊D")
await pong.edit("8==✊=D")
await pong.edit("8=✊==D")
await pong.edit("8✊===D")
await pong.edit("8=✊==D")
await pong.edit("8==✊=D")
await pong.edit("8===✊D")
await pong.edit("8===✊D💦")
await pong.edit("8====D💦💦")
await pong.edit("**CROOTTTT PINGGGG!**")
end = datetime.now()
duration = (end - start).microseconds / 1000
await pong.edit(
f"**NGENTOT!! 🐨**\n**KAMPANG** : %sms\n**Bot Uptime** : {uptime}🕛" % (duration)
)
@bot.on(man_cmd(outgoing=True, pattern=r"speedtest$"))
async def _(speed):
"""For .speedtest command, use SpeedTest to check server speeds."""
await speed.edit("`Running speed test...`")
test = Speedtest()
test.get_best_server()
test.download()
test.upload()
test.results.share()
result = test.results.dict()
msg = (
f"**Started at {result["timestamp"]}**\n\n"
"**Client**\n"
f"**ISP :** `{result["client"]["isp"]}`\n"
f"**Country :** `{result["client"]["country"]}`\n\n"
"**Server**\n"
f"**Name :** `{result["server"]["name"]}`\n"
f"**Country :** `{result["server"]["country"]}`\n"
f"**Sponsor :** `{result["server"]["sponsor"]}`\n\n"
f"**Ping :** `{result["ping"]}`\n"
f"**Upload :** `{humanbytes(result["upload"])}/s`\n"
f"**Download :** `{humanbytes(result["download"])}/s`"
)
await speed.delete()
await speed.client.send_file(
speed.chat_id,
result["share"],
caption=msg,
force_document=False,
)
@bot.on(man_cmd(outgoing=True, pattern=r"pong$"))
async def _(pong):
"""For .ping command, ping the userbot from any chat."""
start = datetime.now()
await pong.edit("`Sepong.....🏓`")
end = datetime.now()
duration = (end - start).microseconds / 9000
await pong.edit("🏓 **Ping!**\n`%sms`" % (duration))
# KALO NGEFORK absen ini GA USAH DI HAPUS YA GOBLOK 😡
@register(incoming=True, from_users=844432220, pattern=r"^.absen$")
async def risman(ganteng):
await ganteng.reply(random.choice(absen))
# JANGAN DI HAPUS GOBLOK 😡 LU COPY AJA TINGGAL TAMBAHIN
# DI HAPUS GUA GBAN YA 🥴 GUA TANDAIN LU AKUN TELENYA 😡
CMD_HELP.update(
{
"ping": f"**Plugin : **`ping`\
\n\n • **Syntax :** `{cmd}ping` ; `{cmd}lping` ; `{cmd}xping` ; `{cmd}kping` ; `{cmd}fping`\
\n • **Function : **Untuk menunjukkan ping userbot.\
\n\n • **Syntax :** `{cmd}pong`\
\n • **Function : **Sama seperti perintah ping\
"
}
)
CMD_HELP.update(
{
"speedtest": f"**Plugin : **`speedtest`\
\n\n • **Syntax :** `{cmd}speedtest`\
\n • **Function : **Untuk Mengetes kecepatan server userbot.\
"
}
)
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# ReCode by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot & t.me/Lunatic0de
import random
import time
from datetime import datetime
from speedtest import Speedtest
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, StartTime, bot
from userbot.events import man_cmd, register
from userbot.utils import humanbytes
absen = [
"**Hadir bang** 😁",
"**Hadir kak** 😉",
"**Hadir dong** 😁",
"**Hadir ganteng** 🥵",
"**Hadir bro** 😎",
"**Hadir kak maap telat** 🥺",
]
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["s", "m", "Jam", "Hari"]
while count < 4:
count += 1
remainder, result = divmod(seconds, 60) if count < 3 else divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
@bot.on(man_cmd(outgoing=True, pattern=r"ping$"))
async def _(ping):
"""For .ping command, ping the userbot from any chat."""
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
await ping.edit("**✣**")
await ping.edit("**✣✣**")
await ping.edit("**✣✣✣**")
await ping.edit("**✣✣✣✣**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await bot.get_me()
await ping.edit(
f"**PONG!!🏓**\n"
f"✣ **Pinger** - `%sms`\n"
f"✣ **Uptime -** `{uptime}` \n"
f"**✦҈͜͡Owner :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@bot.on(man_cmd(outgoing=True, pattern=r"xping$"))
async def _(ping):
"""For .ping command, ping the userbot from any chat."""
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
await ping.edit("`Pinging....`")
end = datetime.now()
duration = (end - start).microseconds / 1000
await ping.edit(
f"**PONG!! 🍭**\n**Pinger** : %sms\n**Bot Uptime** : {uptime}🕛" % (duration)
)
@bot.on(man_cmd(outgoing=True, pattern=r"lping$"))
async def _(ping):
"""For .ping command, ping the userbot from any chat."""
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
await ping.edit("**★ PING ★**")
await ping.edit("**★★ PING ★★**")
await ping.edit("**★★★ PING ★★★**")
await ping.edit("**★★★★ PING ★★★★**")
await ping.edit("**✦҈͜͡➳ PONG!**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await bot.get_me()
await ping.edit(
f"❃ **Ping !!** "
f"`%sms` \n"
f"❃ **Uptime -** "
f"`{uptime}` \n"
f"**✦҈͜͡➳ Master :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@bot.on(man_cmd(outgoing=True, pattern=r"fping$"))
async def _(f):
"""For .ping command, ping the userbot from any chat."""
await get_readable_time((time.time() - StartTime))
start = datetime.now()
await f.edit(". /¯ )")
await f.edit(". /¯ )\n /¯ /")
await f.edit(
". /¯ )\n /¯ /\n / /"
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸"
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ "
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ \n ('( ( ( ( ¯~/' ')"
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ \n ('( ( ( ( ¯~/' ')\n \\ /"
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ \n ('( ( ( ( ¯~/' ')\n \\ /\n \\ _.•´"
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ \n ('( ( ( ( ¯~/' ')\n \\ /\n \\ _.•´\n \\ ("
)
await f.edit(
". /¯ )\n /¯ /\n / /\n /´¯/' '/´¯¯`•¸\n /'/ / / /¨¯\\ \n ('( ( ( ( ¯~/' ')\n \\ /\n \\ _.•´\n \\ (\n \\ "
)
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await bot.get_me()
await f.edit(
f"**PONG!!🏓**\n"
f"✣ **Pinger** - `%sms`\n"
f"✣ **Uptime -** `{uptime}` \n"
f"**✦҈͜͡Owner :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@bot.on(man_cmd(outgoing=True, pattern=r"keping$"))
async def _(pong):
await get_readable_time((time.time() - StartTime))
start = datetime.now()
await pong.edit("**『⍟𝐊𝐎𝐍𝐓𝐎𝐋』**")
await pong.edit("**◆◈𝐊𝐀𝐌𝐏𝐀𝐍𝐆◈◆**")
await pong.edit("**𝐏𝐄𝐂𝐀𝐇𝐊𝐀𝐍 𝐁𝐈𝐉𝐈 𝐊𝐀𝐔 𝐀𝐒𝐔**")
await pong.edit("**☬𝐒𝐈𝐀𝐏 𝐊𝐀𝐌𝐏𝐀𝐍𝐆 𝐌𝐄𝐍𝐔𝐌𝐁𝐔𝐊 𝐀𝐒𝐔☬**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await bot.get_me()
await pong.edit(
f"**✲ 𝙺𝙾𝙽𝚃𝙾𝙻 𝙼𝙴𝙻𝙴𝙳𝚄𝙶** "
f"\n ⫸ ᴷᵒⁿᵗᵒˡ `%sms` \n"
f"**✲ 𝙱𝙸𝙹𝙸 𝙿𝙴𝙻𝙴𝚁** "
f"\n ⫸ ᴷᵃᵐᵖᵃⁿᵍ『[{user.first_name}](tg://user?id={user.id})』 \n" % (duration)
)
# .keping & kping Coded by Koala
@bot.on(man_cmd(outgoing=True, pattern=r"kping$"))
async def _(pong):
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
await pong.edit("8✊===D")
await pong.edit("8=✊==D")
await pong.edit("8==✊=D")
await pong.edit("8===✊D")
await pong.edit("8==✊=D")
await pong.edit("8=✊==D")
await pong.edit("8✊===D")
await pong.edit("8=✊==D")
await pong.edit("8==✊=D")
await pong.edit("8===✊D")
await pong.edit("8==✊=D")
await pong.edit("8=✊==D")
await pong.edit("8✊===D")
await pong.edit("8=✊==D")
await pong.edit("8==✊=D")
await pong.edit("8===✊D")
await pong.edit("8===✊D💦")
await pong.edit("8====D💦💦")
await pong.edit("**CROOTTTT PINGGGG!**")
end = datetime.now()
duration = (end - start).microseconds / 1000
await pong.edit(
f"**NGENTOT!! 🐨**\n**KAMPANG** : %sms\n**Bot Uptime** : {uptime}🕛" % (duration)
)
@bot.on(man_cmd(outgoing=True, pattern=r"speedtest$"))
async def _(speed):
"""For .speedtest command, use SpeedTest to check server speeds."""
await speed.edit("`Running speed test...`")
test = Speedtest()
test.get_best_server()
test.download()
test.upload()
test.results.share()
result = test.results.dict()
msg = (
f"**Started at {result['timestamp']}**\n\n"
"**Client**\n"
f"**ISP :** `{result['client']['isp']}`\n"
f"**Country :** `{result['client']['country']}`\n\n"
"**Server**\n"
f"**Name :** `{result['server']['name']}`\n"
f"**Country :** `{result['server']['country']}`\n"
f"**Sponsor :** `{result['server']['sponsor']}`\n\n"
f"**Ping :** `{result['ping']}`\n"
f"**Upload :** `{humanbytes(result['upload'])}/s`\n"
f"**Download :** `{humanbytes(result['download'])}/s`"
)
await speed.delete()
await speed.client.send_file(
speed.chat_id,
result["share"],
caption=msg,
force_document=False,
)
@bot.on(man_cmd(outgoing=True, pattern=r"pong$"))
async def _(pong):
"""For .ping command, ping the userbot from any chat."""
start = datetime.now()
await pong.edit("`Sepong.....🏓`")
end = datetime.now()
duration = (end - start).microseconds / 9000
await pong.edit("🏓 **Ping!**\n`%sms`" % (duration))
# KALO NGEFORK absen ini GA USAH DI HAPUS YA GOBLOK 😡
@register(incoming=True, from_users=844432220, pattern=r"^.absen$")
async def risman(ganteng):
await ganteng.reply(random.choice(absen))
# JANGAN DI HAPUS GOBLOK 😡 LU COPY AJA TINGGAL TAMBAHIN
# DI HAPUS GUA GBAN YA 🥴 GUA TANDAIN LU AKUN TELENYA 😡
CMD_HELP.update(
{
"ping": f"**Plugin : **`ping`\
\n\n • **Syntax :** `{cmd}ping` ; `{cmd}lping` ; `{cmd}xping` ; `{cmd}kping` ; `{cmd}fping`\
\n • **Function : **Untuk menunjukkan ping userbot.\
\n\n • **Syntax :** `{cmd}pong`\
\n • **Function : **Sama seperti perintah ping\
"
}
)
CMD_HELP.update(
{
"speedtest": f"**Plugin : **`speedtest`\
\n\n • **Syntax :** `{cmd}speedtest`\
\n • **Function : **Untuk Mengetes kecepatan server userbot.\
"
}
)
|
import jsonlines
import re
import transformers
import torch
from tqdm import trange, tqdm
import argparse
import os, sys
def get_case_insensitive_key_value(input_dict, key):
return next((value for dict_key, value in input_dict.items() if dict_key.lower() == key.lower()), None)
def filter_triples(model, tokenizer, texts):
if max([len(text) for text in texts])>256:
range_length = 12
else:
range_length = 64
result = []
for batch in range(0,len(texts),range_length):
encoded_input = tokenizer(
[ex[0] for ex in texts[batch: batch + range_length]], [ex[1] for ex in texts[batch: batch + range_length]],
return_tensors="pt",
add_special_tokens=True,
max_length=256,
padding='longest',
return_token_type_ids=False,
truncation_strategy='only_first')
for tensor in encoded_input:
encoded_input[tensor] = encoded_input[tensor].cuda()
with torch.no_grad(): # remove this if you need gradients.
outputs = model(**encoded_input, return_dict=True, output_attentions=False, output_hidden_states = False)
result.append(outputs['logits'].softmax(dim=1))
del outputs
logits = torch.cat(result)
# if language == 'ko':
# return logits.argmax(1) == get_case_insensitive_key_value(model.config.label2id, 'entailment')# [:,get_case_insensitive_key_value(model.config.label2id, 'entailment')]>0.75
return logits[:,get_case_insensitive_key_value(model.config.label2id, 'entailment')]#>0.75
def prepare_triplet(subject_entity, object_entity, article_text, predicate):
text_triplet = ''
text_triplet += re.compile("(?<!\d)\.(?!\d)").split(article_text[:min(subject_entity['boundaries'][0], object_entity['boundaries'][0])])[-1]
text_triplet += article_text[min(subject_entity['boundaries'][0], object_entity['boundaries'][0]):max(subject_entity['boundaries'][1], object_entity['boundaries'][1])]
text_triplet += re.compile("(?<!\d)\.(?!\d)").split(article_text[max(subject_entity['boundaries'][1], object_entity['boundaries'][1]):])[0]
if language == 'ko' or language == 'kosource':
return (text_triplet.strip('\n'), ' '.join([str(subject_entity['surfaceform']), str(object_entity['surfaceform']), str(predicate['surfaceform'])]))
# return (text_triplet.strip('\n'), ' '.join([str(object_entity['surfaceform']), str(predicate['surfaceform']), str(subject_entity['surfaceform'])]))
return (text_triplet.strip('\n'), ' '.join([str(subject_entity['surfaceform']), str(predicate['surfaceform']), str(object_entity['surfaceform'])]))
def main(folder_input = 'out/ko'):
global language
language = folder_input.split('/')[1]
if language == 'ko' or language == 'kosource':
model_name_or_path = '/home/huguetcabot/sentence_transformers/test-glue/XNLI'
# model_name_or_path = '/home/huguetcabot/sentence_transformers/test-glue/run-1/checkpoint-3910'
else:
model_name_or_path = 'joeddav/xlm-roberta-large-xnli'
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name_or_path)
model_config = transformers.AutoConfig.from_pretrained(
model_name_or_path,
# num_labels=2,
output_hidden_states=True,
output_attentions=True,
)
model = transformers.AutoModelForSequenceClassification.from_pretrained(model_name_or_path, config = model_config)
model.cuda()
model.eval()
model.half()
with jsonlines.open(f'out_clean/{'/'.join(folder_input.split('/')[1:])}.jsonl', mode='w') as writer:
for k,j,y in os.walk(folder_input):
for file_name in y:
with jsonlines.open(k + '/' + file_name) as reader:
for i, article in tqdm(enumerate(reader)):
previous = []
triples_list = []
texts = []
for triple in article['triples']:
if triple['subject']['boundaries'] != None and triple['object']['boundaries'] != None and (triple['subject']['boundaries'], triple['object']['boundaries']) not in previous:
previous.append((triple['subject']['boundaries'], triple['object']['boundaries']))
triples_list.append(triple)
texts.append(prepare_triplet(triple['subject'], triple['object'], article['text'], triple["predicate"]))
elif (triple['subject']['boundaries'], triple['object']['boundaries']) not in previous:
distance = 1000000
for entity in article['entities']:
if entity['uri'] == triple['subject']['uri']:
if abs(min(triple['object']['boundaries'])-min(entity['boundaries'])) < distance:
subject_entity = entity
distance = abs(min(triple['object']['boundaries'])-min(entity['boundaries']))
triple['subject'] = subject_entity
previous.append((triple['subject']['boundaries'], triple['object']['boundaries']))
triples_list.append(triple)
texts.append(prepare_triplet(subject_entity, triple['object'], article['text'], triple["predicate"]))
indexes = filter_triples(model, tokenizer, texts)
if len(indexes) == 0:
continue
for pred, trip in zip(indexes, triples_list):
trip['confidence'] = pred.item()
# article['triples'] = [x for i,x in zip(indexes, triples_list) if (i == True) or x["predicate"]["uri"] in ["P569", "P570"]]
article['triples'] = triples_list
writer.write(article)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("--folder_input",
help="input file")
args = parser.parse_args()
main(args.folder_input)
|
import jsonlines
import re
import transformers
import torch
from tqdm import trange, tqdm
import argparse
import os, sys
def get_case_insensitive_key_value(input_dict, key):
return next((value for dict_key, value in input_dict.items() if dict_key.lower() == key.lower()), None)
def filter_triples(model, tokenizer, texts):
if max([len(text) for text in texts])>256:
range_length = 12
else:
range_length = 64
result = []
for batch in range(0,len(texts),range_length):
encoded_input = tokenizer(
[ex[0] for ex in texts[batch: batch + range_length]], [ex[1] for ex in texts[batch: batch + range_length]],
return_tensors="pt",
add_special_tokens=True,
max_length=256,
padding='longest',
return_token_type_ids=False,
truncation_strategy='only_first')
for tensor in encoded_input:
encoded_input[tensor] = encoded_input[tensor].cuda()
with torch.no_grad(): # remove this if you need gradients.
outputs = model(**encoded_input, return_dict=True, output_attentions=False, output_hidden_states = False)
result.append(outputs['logits'].softmax(dim=1))
del outputs
logits = torch.cat(result)
# if language == 'ko':
# return logits.argmax(1) == get_case_insensitive_key_value(model.config.label2id, 'entailment')# [:,get_case_insensitive_key_value(model.config.label2id, 'entailment')]>0.75
return logits[:,get_case_insensitive_key_value(model.config.label2id, 'entailment')]#>0.75
def prepare_triplet(subject_entity, object_entity, article_text, predicate):
text_triplet = ''
text_triplet += re.compile("(?<!\d)\.(?!\d)").split(article_text[:min(subject_entity['boundaries'][0], object_entity['boundaries'][0])])[-1]
text_triplet += article_text[min(subject_entity['boundaries'][0], object_entity['boundaries'][0]):max(subject_entity['boundaries'][1], object_entity['boundaries'][1])]
text_triplet += re.compile("(?<!\d)\.(?!\d)").split(article_text[max(subject_entity['boundaries'][1], object_entity['boundaries'][1]):])[0]
if language == 'ko' or language == 'kosource':
return (text_triplet.strip('\n'), ' '.join([str(subject_entity['surfaceform']), str(object_entity['surfaceform']), str(predicate['surfaceform'])]))
# return (text_triplet.strip('\n'), ' '.join([str(object_entity['surfaceform']), str(predicate['surfaceform']), str(subject_entity['surfaceform'])]))
return (text_triplet.strip('\n'), ' '.join([str(subject_entity['surfaceform']), str(predicate['surfaceform']), str(object_entity['surfaceform'])]))
def main(folder_input = 'out/ko'):
global language
language = folder_input.split('/')[1]
if language == 'ko' or language == 'kosource':
model_name_or_path = '/home/huguetcabot/sentence_transformers/test-glue/XNLI'
# model_name_or_path = '/home/huguetcabot/sentence_transformers/test-glue/run-1/checkpoint-3910'
else:
model_name_or_path = 'joeddav/xlm-roberta-large-xnli'
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name_or_path)
model_config = transformers.AutoConfig.from_pretrained(
model_name_or_path,
# num_labels=2,
output_hidden_states=True,
output_attentions=True,
)
model = transformers.AutoModelForSequenceClassification.from_pretrained(model_name_or_path, config = model_config)
model.cuda()
model.eval()
model.half()
with jsonlines.open(f'out_clean/{"/".join(folder_input.split("/")[1:])}.jsonl', mode='w') as writer:
for k,j,y in os.walk(folder_input):
for file_name in y:
with jsonlines.open(k + '/' + file_name) as reader:
for i, article in tqdm(enumerate(reader)):
previous = []
triples_list = []
texts = []
for triple in article['triples']:
if triple['subject']['boundaries'] != None and triple['object']['boundaries'] != None and (triple['subject']['boundaries'], triple['object']['boundaries']) not in previous:
previous.append((triple['subject']['boundaries'], triple['object']['boundaries']))
triples_list.append(triple)
texts.append(prepare_triplet(triple['subject'], triple['object'], article['text'], triple["predicate"]))
elif (triple['subject']['boundaries'], triple['object']['boundaries']) not in previous:
distance = 1000000
for entity in article['entities']:
if entity['uri'] == triple['subject']['uri']:
if abs(min(triple['object']['boundaries'])-min(entity['boundaries'])) < distance:
subject_entity = entity
distance = abs(min(triple['object']['boundaries'])-min(entity['boundaries']))
triple['subject'] = subject_entity
previous.append((triple['subject']['boundaries'], triple['object']['boundaries']))
triples_list.append(triple)
texts.append(prepare_triplet(subject_entity, triple['object'], article['text'], triple["predicate"]))
indexes = filter_triples(model, tokenizer, texts)
if len(indexes) == 0:
continue
for pred, trip in zip(indexes, triples_list):
trip['confidence'] = pred.item()
# article['triples'] = [x for i,x in zip(indexes, triples_list) if (i == True) or x["predicate"]["uri"] in ["P569", "P570"]]
article['triples'] = triples_list
writer.write(article)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("--folder_input",
help="input file")
args = parser.parse_args()
main(args.folder_input)
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import difflib
import os
import sys
from collections import defaultdict
from datetime import datetime
from distutils.util import strtobool
from itertools import groupby
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
import humanize
from bravado.exception import HTTPError
from service_configuration_lib import read_deploy
from paasta_tools import kubernetes_tools
from paasta_tools.adhoc_tools import AdhocJobConfig
from paasta_tools.api.client import get_paasta_api_client
from paasta_tools.chronos_tools import ChronosJobConfig
from paasta_tools.cli.utils import execute_paasta_serviceinit_on_remote_master
from paasta_tools.cli.utils import figure_out_service_name
from paasta_tools.cli.utils import get_instance_configs_for_service
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import list_deploy_groups
from paasta_tools.flinkcluster_tools import FlinkClusterConfig
from paasta_tools.flinkcluster_tools import get_dashboard_url
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.kubernetes_tools import KubernetesDeployStatus
from paasta_tools.marathon_serviceinit import bouncing_status_human
from paasta_tools.marathon_serviceinit import desired_state_human
from paasta_tools.marathon_serviceinit import marathon_app_deploy_status_human
from paasta_tools.marathon_serviceinit import status_marathon_job_human
from paasta_tools.marathon_tools import MarathonDeployStatus
from paasta_tools.monitoring_tools import get_team
from paasta_tools.monitoring_tools import list_teams
from paasta_tools.tron_tools import TronActionConfig
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import datetime_from_utc_to_local
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import get_soa_cluster_deploy_files
from paasta_tools.utils import InstanceConfig
from paasta_tools.utils import list_all_instances_for_service
from paasta_tools.utils import list_clusters
from paasta_tools.utils import list_services
from paasta_tools.utils import load_deployments_json
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import paasta_print
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import SystemPaastaConfig
HTTP_ONLY_INSTANCE_CONFIG: Sequence[Type[InstanceConfig]] = [
FlinkClusterConfig,
KubernetesDeploymentConfig,
]
SSH_ONLY_INSTANCE_CONFIG = [
ChronosJobConfig,
AdhocJobConfig,
]
def add_subparser(
subparsers,
) -> None:
status_parser = subparsers.add_parser(
'status',
help="Display the status of a PaaSTA service.",
description=(
"'paasta status' works by SSH'ing to remote PaaSTA masters and "
"inspecting the local APIs, and reports on the overal health "
"of a service."
),
epilog=(
"Note: This command requires SSH and sudo privileges on the remote PaaSTA "
"masters."
),
)
status_parser.add_argument(
'-v', '--verbose',
action='count',
dest="verbose",
default=0,
help="Print out more output regarding the state of the service. "
"A second -v will also print the stdout/stderr tail.",
)
status_parser.add_argument(
'-d', '--soa-dir',
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
add_instance_filter_arguments(status_parser)
status_parser.set_defaults(command=paasta_status)
def add_instance_filter_arguments(
status_parser,
verb: str = 'inspect',
) -> None:
status_parser.add_argument(
'-s', '--service',
help=f'The name of the service you wish to {verb}',
).completer = lazy_choices_completer(list_services)
status_parser.add_argument(
'-c', '--clusters',
help=f"A comma-separated list of clusters to {verb}. By default, will {verb} all clusters.\n"
f"For example: --clusters norcal-prod,nova-prod",
).completer = lazy_choices_completer(list_clusters)
status_parser.add_argument(
'-i', '--instances',
help=f"A comma-separated list of instances to {verb}. By default, will {verb} all instances.\n"
f"For example: --instances canary,main",
) # No completer because we need to know service first and we can't until some other stuff has happened
status_parser.add_argument(
'-l', '--deploy-group',
help=(
f'Name of the deploy group which you want to {verb}. '
f'If specified together with --instances and/or --clusters, will {verb} common instances only.'
),
).completer = lazy_choices_completer(list_deploy_groups)
status_parser.add_argument(
'-o', '--owner',
help=f'Only {verb} instances with this owner specified in soa-configs.',
).completer = lazy_choices_completer(list_teams)
status_parser.add_argument(
'-r', '--registration',
help=f'Only {verb} instances with this registration.',
)
def missing_deployments_message(
service: str,
) -> str:
message = (
f"{service} has no deployments in deployments.json yet.\n "
"Has Jenkins run?"
)
return message
def get_deploy_info(
deploy_file_path: str,
) -> Mapping:
deploy_info = read_deploy(deploy_file_path)
if not deploy_info:
paasta_print('Error encountered with %s' % deploy_file_path)
exit(1)
return deploy_info
def get_planned_deployments(
service: str,
soa_dir: str,
) -> Iterable[str]:
for cluster, cluster_deploy_file in get_soa_cluster_deploy_files(
service=service,
soa_dir=soa_dir,
):
for instance in get_deploy_info(cluster_deploy_file):
yield f'{cluster}.{instance}'
def list_deployed_clusters(
pipeline: Sequence[str],
actual_deployments: Sequence[str],
) -> Sequence[str]:
"""Returns a list of clusters that a service is deployed to given
an input deploy pipeline and the actual deployments"""
deployed_clusters: List[str] = []
for namespace in pipeline:
cluster, instance = namespace.split('.')
if namespace in actual_deployments:
if cluster not in deployed_clusters:
deployed_clusters.append(cluster)
return deployed_clusters
def get_actual_deployments(
service: str,
soa_dir: str,
) -> Mapping[str, str]:
deployments_json = load_deployments_json(service, soa_dir)
if not deployments_json:
paasta_print("Warning: it looks like %s has not been deployed anywhere yet!" % service, file=sys.stderr)
# Create a dictionary of actual $service Jenkins deployments
actual_deployments = {}
for key, branch_dict in deployments_json.config_dict.items():
service, namespace = key.split(':')
if service == service:
value = branch_dict['docker_image']
sha = value[value.rfind('-') + 1:]
actual_deployments[namespace.replace('paasta-', '', 1)] = sha
return actual_deployments
def paasta_status_on_api_endpoint(
cluster: str,
service: str,
instance: str,
output: List[str],
system_paasta_config: SystemPaastaConfig,
verbose: int,
) -> int:
client = get_paasta_api_client(cluster, system_paasta_config)
if not client:
paasta_print('Cannot get a paasta-api client')
exit(1)
try:
status = client.service.status_instance(service=service, instance=instance).result()
except HTTPError as exc:
paasta_print(exc.response.text)
return exc.status_code
output.append(' instance: %s' % PaastaColors.blue(instance))
if status.git_sha != '':
output.append(' Git sha: %s (desired)' % status.git_sha)
if status.marathon is not None:
return print_marathon_status(service, instance, output, status.marathon)
elif status.kubernetes is not None:
return print_kubernetes_status(service, instance, output, status.kubernetes)
elif status.tron is not None:
return print_tron_status(service, instance, output, status.tron, verbose)
elif status.flinkcluster is not None:
return print_flinkcluster_status(cluster, service, instance, output, status.flinkcluster.get('status'), verbose)
else:
paasta_print("Not implemented: Looks like %s is not a Marathon or Kubernetes instance" % instance)
return 0
def print_marathon_status(
service: str,
instance: str,
output: List[str],
marathon_status,
) -> int:
if marathon_status.error_message:
output.append(marathon_status.error_message)
return 1
bouncing_status = bouncing_status_human(
marathon_status.app_count,
marathon_status.bounce_method,
)
desired_state = desired_state_human(
marathon_status.desired_state,
marathon_status.expected_instance_count,
)
output.append(f" State: {bouncing_status} - Desired state: {desired_state}")
status = MarathonDeployStatus.fromstring(marathon_status.deploy_status)
if status != MarathonDeployStatus.NotRunning:
if status == MarathonDeployStatus.Delayed:
deploy_status = marathon_app_deploy_status_human(status, marathon_status.backoff_seconds)
else:
deploy_status = marathon_app_deploy_status_human(status)
else:
deploy_status = 'NotRunning'
output.append(
" {}".format(
status_marathon_job_human(
service=service,
instance=instance,
deploy_status=deploy_status,
desired_app_id=marathon_status.app_id,
app_count=marathon_status.app_count,
running_instances=marathon_status.running_instance_count,
normal_instance_count=marathon_status.expected_instance_count,
),
),
)
return 0
def kubernetes_app_deploy_status_human(status, backoff_seconds=None):
status_string = kubernetes_tools.KubernetesDeployStatus.tostring(status)
if status == kubernetes_tools.KubernetesDeployStatus.Waiting:
deploy_status = "%s (new tasks waiting for capacity to become available)" % PaastaColors.red(status_string)
elif status == kubernetes_tools.KubernetesDeployStatus.Deploying:
deploy_status = PaastaColors.yellow(status_string)
elif status == kubernetes_tools.KubernetesDeployStatus.Running:
deploy_status = PaastaColors.bold(status_string)
else:
deploy_status = status_string
return deploy_status
def status_kubernetes_job_human(
service: str,
instance: str,
deploy_status: str,
desired_app_id: str,
app_count: int,
running_instances: int,
normal_instance_count: int,
) -> str:
name = PaastaColors.cyan(compose_job_id(service, instance))
if app_count >= 0:
if running_instances >= normal_instance_count:
status = PaastaColors.green("Healthy")
instance_count = PaastaColors.green("(%d/%d)" % (running_instances, normal_instance_count))
elif running_instances == 0:
status = PaastaColors.yellow("Critical")
instance_count = PaastaColors.red("(%d/%d)" % (running_instances, normal_instance_count))
else:
status = PaastaColors.yellow("Warning")
instance_count = PaastaColors.yellow("(%d/%d)" % (running_instances, normal_instance_count))
return "Kubernetes: {} - up with {} instances. Status: {}".format(
status, instance_count, deploy_status,
)
else:
status = PaastaColors.yellow("Warning")
return "Kubernetes: {} - {} (app {}) is not configured in Kubernetes yet (waiting for bounce)".format(
status, name, desired_app_id,
)
def print_flinkcluster_status(
cluster: str,
service: str,
instance: str,
output: List[str],
status,
verbose: int,
) -> int:
if status is None:
output.append(PaastaColors.red(" Flink cluster is not available yet"))
return 1
if status.state != "running":
output.append(" State: {state}".format(
state=PaastaColors.yellow(status.state),
))
output.append(f" No other information available in non-running state")
return 0
dashboard_url = get_dashboard_url(
cluster=cluster,
service=service,
instance=instance,
)
if verbose:
output.append(f" Flink version: {status.config["flink-version"]} {status.config["flink-revision"]}")
else:
output.append(f" Flink version: {status.config["flink-version"]}")
output.append(f" URL: {dashboard_url}/")
output.append(f" State: {status.state}")
output.append(
" Jobs:"
f" {status.overview["jobs-running"]} running,"
f" {status.overview["jobs-finished"]} finished,"
f" {status.overview["jobs-failed"]} failed,"
f" {status.overview["jobs-cancelled"]} cancelled",
)
output.append(
" "
f" {status.overview["taskmanagers"]} taskmanagers,"
f" {status.overview["slots-available"]}/{status.overview["slots-total"]} slots available",
)
output.append(f" Jobs:")
if verbose:
output.append(f" Job Name State Job ID Started")
else:
output.append(f" Job Name State Started")
# Use only the most recent jobs
unique_jobs = (
sorted(jobs, key=lambda j: -j['start-time'])[0]
for _, jobs in groupby(
sorted(status.jobs, key=lambda j: j['name']),
lambda j: j['name'],
)
)
for job in unique_jobs:
job_id = job['jid']
if verbose:
fmt = """ {job_name: <32.32} {state: <11} {job_id} {start_time}
{dashboard_url}"""
else:
fmt = " {job_name: <32.32} {state: <11} {start_time}"
start_time = datetime_from_utc_to_local(datetime.utcfromtimestamp(int(job['start-time']) // 1000))
output.append(fmt.format(
job_id=job_id,
job_name=job['name'].split('.', 2)[2],
state=job['state'],
start_time=f'{str(start_time)} ({humanize.naturaltime(start_time)})',
dashboard_url=PaastaColors.grey(
f'{dashboard_url}/#/jobs/{job_id}',
),
))
if job_id in status.exceptions:
exceptions = status.exceptions[job_id]
root_exception = exceptions['root-exception']
if root_exception is not None:
output.append(f" Exception: {root_exception}")
ts = exceptions['timestamp']
if ts is not None:
exc_ts = datetime_from_utc_to_local(datetime.utcfromtimestamp(int(ts) // 1000))
output.append(f" {str(exc_ts)} ({humanize.naturaltime(exc_ts)})")
return 0
def print_kubernetes_status(
service: str,
instance: str,
output: List[str],
kubernetes_status,
) -> int:
if kubernetes_status.error_message:
output.append(kubernetes_status.error_message)
return 1
bouncing_status = bouncing_status_human(
kubernetes_status.app_count,
kubernetes_status.bounce_method,
)
desired_state = desired_state_human(
kubernetes_status.desired_state,
kubernetes_status.expected_instance_count,
)
output.append(f" State: {bouncing_status} - Desired state: {desired_state}")
status = KubernetesDeployStatus.fromstring(kubernetes_status.deploy_status)
deploy_status = kubernetes_app_deploy_status_human(status)
output.append(
" {}".format(
status_kubernetes_job_human(
service=service,
instance=instance,
deploy_status=deploy_status,
desired_app_id=kubernetes_status.app_id,
app_count=kubernetes_status.app_count,
running_instances=kubernetes_status.running_instance_count,
normal_instance_count=kubernetes_status.expected_instance_count,
),
),
)
return 0
def print_tron_status(
service: str,
instance: str,
output: List[str],
tron_status,
verbose: int = 0,
) -> int:
output.append(f" Tron job: {tron_status.job_name}")
if verbose:
output.append(f" Status: {tron_status.job_status}")
output.append(f" Schedule: {tron_status.job_schedule}")
output.append(" Dashboard: {}".format(PaastaColors.blue(tron_status.job_url)))
output.append(f" Action: {tron_status.action_name}")
output.append(f" Status: {tron_status.action_state}")
if verbose:
output.append(f" Start time: {tron_status.action_start_time}")
output.append(f" Command: {tron_status.action_command}")
if verbose > 1:
output.append(f" Raw Command: {tron_status.action_raw_command}")
output.append(f" Stdout: \n{tron_status.action_stdout}")
output.append(f" Stderr: \n{tron_status.action_stderr}")
return 0
def report_status_for_cluster(
service: str,
cluster: str,
deploy_pipeline: Sequence[str],
actual_deployments: Mapping[str, str],
instance_whitelist: Mapping[str, Type[InstanceConfig]],
system_paasta_config: SystemPaastaConfig,
verbose: int = 0,
use_api_endpoint: bool = False,
) -> Tuple[int, Sequence[str]]:
"""With a given service and cluster, prints the status of the instances
in that cluster"""
output = ['', 'service: %s' % service, 'cluster: %s' % cluster]
seen_instances = []
deployed_instances = []
instances = instance_whitelist.keys()
http_only_instances = [
instance for instance, instance_config_class in instance_whitelist.items() if instance_config_class
in HTTP_ONLY_INSTANCE_CONFIG
]
ssh_only_instances = [
instance for instance, instance_config_class in instance_whitelist.items() if instance_config_class
in SSH_ONLY_INSTANCE_CONFIG
]
tron_jobs = [
instance for instance, instance_config_class in instance_whitelist.items() if instance_config_class
== TronActionConfig
]
for namespace in deploy_pipeline:
cluster_in_pipeline, instance = namespace.split('.')
seen_instances.append(instance)
if cluster_in_pipeline != cluster:
continue
if instances and instance not in instances:
continue
# Case: service deployed to cluster.instance
if namespace in actual_deployments:
deployed_instances.append(instance)
# Case: flinkcluster instances don't use `deployments.json`
elif instance_whitelist.get(instance) == FlinkClusterConfig:
deployed_instances.append(instance)
# Case: service NOT deployed to cluster.instance
else:
output.append(' instance: %s' % PaastaColors.red(instance))
output.append(' Git sha: None (not deployed yet)')
api_return_code = 0
ssh_return_code = 0
if len(deployed_instances) > 0:
http_only_deployed_instances = [
deployed_instance
for deployed_instance in deployed_instances
if (
deployed_instance in http_only_instances
or deployed_instance not in ssh_only_instances and use_api_endpoint
)
]
if len(http_only_deployed_instances):
return_codes = [
paasta_status_on_api_endpoint(
cluster=cluster,
service=service,
instance=deployed_instance,
output=output,
system_paasta_config=system_paasta_config,
verbose=verbose,
)
for deployed_instance in http_only_deployed_instances
]
if any(return_codes):
api_return_code = 1
ssh_only_deployed_instances = [
deployed_instance
for deployed_instance in deployed_instances
if (
deployed_instance in ssh_only_instances
or deployed_instance not in http_only_instances and not use_api_endpoint
)
]
if len(ssh_only_deployed_instances):
ssh_return_code, status = execute_paasta_serviceinit_on_remote_master(
'status', cluster, service, ','.join(
deployed_instance
for deployed_instance in ssh_only_deployed_instances
),
system_paasta_config, stream=False, verbose=verbose,
ignore_ssh_output=True,
)
# Status results are streamed. This print is for possible error messages.
if status is not None:
for line in status.rstrip().split('\n'):
output.append(' %s' % line)
if len(tron_jobs) > 0:
return_codes = [
paasta_status_on_api_endpoint(
cluster=cluster,
service=service,
instance=tron_job,
output=output,
system_paasta_config=system_paasta_config,
verbose=verbose,
)
for tron_job in tron_jobs
]
seen_instances.extend(tron_jobs)
output.append(report_invalid_whitelist_values(instances, seen_instances, 'instance'))
if ssh_return_code:
return_code = ssh_return_code
elif api_return_code:
return_code = api_return_code
else:
return_code = 0
return return_code, output
def report_invalid_whitelist_values(
whitelist: Iterable[str],
items: Sequence[str],
item_type: str,
) -> str:
"""Warns the user if there are entries in ``whitelist`` which don't
correspond to any item in ``items``. Helps highlight typos.
"""
return_string = ""
bogus_entries = []
if whitelist is None:
return ''
for entry in whitelist:
if entry not in items:
bogus_entries.append(entry)
if len(bogus_entries) > 0:
return_string = (
"\n"
"Warning: This service does not have any %s matching these names:\n%s"
) % (item_type, ",".join(bogus_entries))
return return_string
def verify_instances(
args_instances: str,
service: str,
clusters: Sequence[str],
) -> Sequence[str]:
"""Verify that a list of instances specified by user is correct for this service.
:param args_instances: a list of instances.
:param service: the service name
:param cluster: a list of clusters
:returns: a list of instances specified in args_instances without any exclusions.
"""
unverified_instances = args_instances.split(",")
service_instances: Set[str] = list_all_instances_for_service(service, clusters=clusters)
misspelled_instances: Sequence[str] = [i for i in unverified_instances if i not in service_instances]
if misspelled_instances:
suggestions: List[str] = []
for instance in misspelled_instances:
suggestions.extend(difflib.get_close_matches(instance, service_instances, n=5, cutoff=0.5)) # type: ignore
suggestions = list(set(suggestions))
if clusters:
message = (
"%s doesn't have any instances matching %s on %s."
% (
service,
', '.join(sorted(misspelled_instances)),
', '.join(sorted(clusters)),
)
)
else:
message = ("%s doesn't have any instances matching %s."
% (service, ', '.join(sorted(misspelled_instances))))
paasta_print(PaastaColors.red(message))
if suggestions:
paasta_print("Did you mean any of these?")
for instance in sorted(suggestions):
paasta_print(" %s" % instance)
return unverified_instances
def normalize_registrations(
service: str,
registrations: Sequence[str],
) -> Sequence[str]:
ret = []
for reg in registrations:
if '.' not in reg:
ret.append(f"{service}.{reg}")
else:
ret.append(reg)
return ret
def get_filters(
args,
) -> Sequence[Callable[[InstanceConfig], bool]]:
"""Figures out which filters to apply from an args object, and returns them
:param args: args object
:returns: list of functions that take an instance config and returns if the instance conf matches the filter
"""
filters = []
if args.service:
filters.append(lambda conf: conf.get_service() in args.service.split(','))
if args.clusters:
filters.append(lambda conf: conf.get_cluster() in args.clusters.split(','))
if args.instances:
filters.append(lambda conf: conf.get_instance() in args.instances.split(','))
if args.deploy_group:
filters.append(lambda conf: conf.get_deploy_group() in args.deploy_group.split(','))
if args.registration:
normalized_regs = normalize_registrations(
service=args.service,
registrations=args.registration.split(','),
)
filters.append(
lambda conf: any(
reg in normalized_regs
for reg in (conf.get_registrations() if hasattr(conf, 'get_registrations') else [])
),
)
if args.owner:
owners = args.owner.split(',')
filters.append(
# If the instance owner is None, check the service owner, else check the instance owner
lambda conf: get_team(
overrides={},
service=conf.get_service(),
soa_dir=args.soa_dir,
) in owners if conf.get_team() is None else conf.get_team() in owners,
)
return filters
def apply_args_filters(
args,
) -> Mapping[str, Mapping[str, Mapping[str, Type[InstanceConfig]]]]:
"""
Take an args object and returns the dict of cluster:service:instances
Currently, will filter by clusters, instances, services, and deploy_groups
If no instances are found, will print a message and try to find matching instances
for each service
:param args: args object containing attributes to filter by
:returns: Dict of dicts, in format {cluster_name: {service_name: {instance1, instance2}}}
"""
clusters_services_instances: DefaultDict[
str,
DefaultDict[
str, Dict[str, Type[InstanceConfig]]
]
] = defaultdict(lambda: defaultdict(dict))
if args.service is None and args.owner is None:
args.service = figure_out_service_name(args, soa_dir=args.soa_dir)
filters = get_filters(args)
all_services = list_services(soa_dir=args.soa_dir)
if args.service and args.service not in all_services:
paasta_print(PaastaColors.red(f'The service "{args.service}" does not exist.'))
suggestions = difflib.get_close_matches(args.service, all_services, n=5, cutoff=0.5)
if suggestions:
paasta_print(PaastaColors.red(f'Did you mean any of these?'))
for suggestion in suggestions:
paasta_print(PaastaColors.red(f' {suggestion}'))
return clusters_services_instances
i_count = 0
for service in all_services:
if args.service and service != args.service:
continue
for instance_conf in get_instance_configs_for_service(service, soa_dir=args.soa_dir):
if all([f(instance_conf) for f in filters]):
cluster_service = clusters_services_instances[instance_conf.get_cluster()][service]
cluster_service[instance_conf.get_instance()] = instance_conf.__class__
i_count += 1
if i_count == 0 and args.service and args.instances:
if args.clusters:
clusters = args.clusters.split(',')
else:
clusters = list_clusters()
for service in args.service.split(','):
verify_instances(args.instances, service, clusters)
return clusters_services_instances
def paasta_status(
args,
) -> int:
"""Print the status of a Yelp service running on PaaSTA.
:param args: argparse.Namespace obj created from sys.args by cli"""
soa_dir = args.soa_dir
system_paasta_config = load_system_paasta_config()
if 'USE_API_ENDPOINT' in os.environ:
use_api_endpoint = strtobool(os.environ['USE_API_ENDPOINT'])
else:
use_api_endpoint = False
return_codes = [0]
tasks = []
clusters_services_instances = apply_args_filters(args)
for cluster, service_instances in clusters_services_instances.items():
for service, instances in service_instances.items():
all_flink = all(i == FlinkClusterConfig for i in instances.values())
actual_deployments: Mapping[str, str]
if all_flink:
actual_deployments = {}
else:
actual_deployments = get_actual_deployments(service, soa_dir)
if all_flink or actual_deployments:
deploy_pipeline = list(get_planned_deployments(service, soa_dir))
tasks.append((
report_status_for_cluster, dict(
service=service,
cluster=cluster,
deploy_pipeline=deploy_pipeline,
actual_deployments=actual_deployments,
instance_whitelist=instances,
system_paasta_config=system_paasta_config,
verbose=args.verbose,
use_api_endpoint=use_api_endpoint,
),
))
else:
paasta_print(missing_deployments_message(service))
return_codes.append(1)
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
tasks = [executor.submit(t[0], **t[1]) for t in tasks] # type: ignore
for future in concurrent.futures.as_completed(tasks): # type: ignore
return_code, output = future.result()
paasta_print('\n'.join(output))
return_codes.append(return_code)
return max(return_codes)
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import difflib
import os
import sys
from collections import defaultdict
from datetime import datetime
from distutils.util import strtobool
from itertools import groupby
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import List
from typing import Mapping
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
import humanize
from bravado.exception import HTTPError
from service_configuration_lib import read_deploy
from paasta_tools import kubernetes_tools
from paasta_tools.adhoc_tools import AdhocJobConfig
from paasta_tools.api.client import get_paasta_api_client
from paasta_tools.chronos_tools import ChronosJobConfig
from paasta_tools.cli.utils import execute_paasta_serviceinit_on_remote_master
from paasta_tools.cli.utils import figure_out_service_name
from paasta_tools.cli.utils import get_instance_configs_for_service
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import list_deploy_groups
from paasta_tools.flinkcluster_tools import FlinkClusterConfig
from paasta_tools.flinkcluster_tools import get_dashboard_url
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.kubernetes_tools import KubernetesDeployStatus
from paasta_tools.marathon_serviceinit import bouncing_status_human
from paasta_tools.marathon_serviceinit import desired_state_human
from paasta_tools.marathon_serviceinit import marathon_app_deploy_status_human
from paasta_tools.marathon_serviceinit import status_marathon_job_human
from paasta_tools.marathon_tools import MarathonDeployStatus
from paasta_tools.monitoring_tools import get_team
from paasta_tools.monitoring_tools import list_teams
from paasta_tools.tron_tools import TronActionConfig
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import datetime_from_utc_to_local
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import get_soa_cluster_deploy_files
from paasta_tools.utils import InstanceConfig
from paasta_tools.utils import list_all_instances_for_service
from paasta_tools.utils import list_clusters
from paasta_tools.utils import list_services
from paasta_tools.utils import load_deployments_json
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import paasta_print
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import SystemPaastaConfig
HTTP_ONLY_INSTANCE_CONFIG: Sequence[Type[InstanceConfig]] = [
FlinkClusterConfig,
KubernetesDeploymentConfig,
]
SSH_ONLY_INSTANCE_CONFIG = [
ChronosJobConfig,
AdhocJobConfig,
]
def add_subparser(
subparsers,
) -> None:
status_parser = subparsers.add_parser(
'status',
help="Display the status of a PaaSTA service.",
description=(
"'paasta status' works by SSH'ing to remote PaaSTA masters and "
"inspecting the local APIs, and reports on the overal health "
"of a service."
),
epilog=(
"Note: This command requires SSH and sudo privileges on the remote PaaSTA "
"masters."
),
)
status_parser.add_argument(
'-v', '--verbose',
action='count',
dest="verbose",
default=0,
help="Print out more output regarding the state of the service. "
"A second -v will also print the stdout/stderr tail.",
)
status_parser.add_argument(
'-d', '--soa-dir',
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
add_instance_filter_arguments(status_parser)
status_parser.set_defaults(command=paasta_status)
def add_instance_filter_arguments(
status_parser,
verb: str = 'inspect',
) -> None:
status_parser.add_argument(
'-s', '--service',
help=f'The name of the service you wish to {verb}',
).completer = lazy_choices_completer(list_services)
status_parser.add_argument(
'-c', '--clusters',
help=f"A comma-separated list of clusters to {verb}. By default, will {verb} all clusters.\n"
f"For example: --clusters norcal-prod,nova-prod",
).completer = lazy_choices_completer(list_clusters)
status_parser.add_argument(
'-i', '--instances',
help=f"A comma-separated list of instances to {verb}. By default, will {verb} all instances.\n"
f"For example: --instances canary,main",
) # No completer because we need to know service first and we can't until some other stuff has happened
status_parser.add_argument(
'-l', '--deploy-group',
help=(
f'Name of the deploy group which you want to {verb}. '
f'If specified together with --instances and/or --clusters, will {verb} common instances only.'
),
).completer = lazy_choices_completer(list_deploy_groups)
status_parser.add_argument(
'-o', '--owner',
help=f'Only {verb} instances with this owner specified in soa-configs.',
).completer = lazy_choices_completer(list_teams)
status_parser.add_argument(
'-r', '--registration',
help=f'Only {verb} instances with this registration.',
)
def missing_deployments_message(
service: str,
) -> str:
message = (
f"{service} has no deployments in deployments.json yet.\n "
"Has Jenkins run?"
)
return message
def get_deploy_info(
deploy_file_path: str,
) -> Mapping:
deploy_info = read_deploy(deploy_file_path)
if not deploy_info:
paasta_print('Error encountered with %s' % deploy_file_path)
exit(1)
return deploy_info
def get_planned_deployments(
service: str,
soa_dir: str,
) -> Iterable[str]:
for cluster, cluster_deploy_file in get_soa_cluster_deploy_files(
service=service,
soa_dir=soa_dir,
):
for instance in get_deploy_info(cluster_deploy_file):
yield f'{cluster}.{instance}'
def list_deployed_clusters(
pipeline: Sequence[str],
actual_deployments: Sequence[str],
) -> Sequence[str]:
"""Returns a list of clusters that a service is deployed to given
an input deploy pipeline and the actual deployments"""
deployed_clusters: List[str] = []
for namespace in pipeline:
cluster, instance = namespace.split('.')
if namespace in actual_deployments:
if cluster not in deployed_clusters:
deployed_clusters.append(cluster)
return deployed_clusters
def get_actual_deployments(
service: str,
soa_dir: str,
) -> Mapping[str, str]:
deployments_json = load_deployments_json(service, soa_dir)
if not deployments_json:
paasta_print("Warning: it looks like %s has not been deployed anywhere yet!" % service, file=sys.stderr)
# Create a dictionary of actual $service Jenkins deployments
actual_deployments = {}
for key, branch_dict in deployments_json.config_dict.items():
service, namespace = key.split(':')
if service == service:
value = branch_dict['docker_image']
sha = value[value.rfind('-') + 1:]
actual_deployments[namespace.replace('paasta-', '', 1)] = sha
return actual_deployments
def paasta_status_on_api_endpoint(
cluster: str,
service: str,
instance: str,
output: List[str],
system_paasta_config: SystemPaastaConfig,
verbose: int,
) -> int:
client = get_paasta_api_client(cluster, system_paasta_config)
if not client:
paasta_print('Cannot get a paasta-api client')
exit(1)
try:
status = client.service.status_instance(service=service, instance=instance).result()
except HTTPError as exc:
paasta_print(exc.response.text)
return exc.status_code
output.append(' instance: %s' % PaastaColors.blue(instance))
if status.git_sha != '':
output.append(' Git sha: %s (desired)' % status.git_sha)
if status.marathon is not None:
return print_marathon_status(service, instance, output, status.marathon)
elif status.kubernetes is not None:
return print_kubernetes_status(service, instance, output, status.kubernetes)
elif status.tron is not None:
return print_tron_status(service, instance, output, status.tron, verbose)
elif status.flinkcluster is not None:
return print_flinkcluster_status(cluster, service, instance, output, status.flinkcluster.get('status'), verbose)
else:
paasta_print("Not implemented: Looks like %s is not a Marathon or Kubernetes instance" % instance)
return 0
def print_marathon_status(
service: str,
instance: str,
output: List[str],
marathon_status,
) -> int:
if marathon_status.error_message:
output.append(marathon_status.error_message)
return 1
bouncing_status = bouncing_status_human(
marathon_status.app_count,
marathon_status.bounce_method,
)
desired_state = desired_state_human(
marathon_status.desired_state,
marathon_status.expected_instance_count,
)
output.append(f" State: {bouncing_status} - Desired state: {desired_state}")
status = MarathonDeployStatus.fromstring(marathon_status.deploy_status)
if status != MarathonDeployStatus.NotRunning:
if status == MarathonDeployStatus.Delayed:
deploy_status = marathon_app_deploy_status_human(status, marathon_status.backoff_seconds)
else:
deploy_status = marathon_app_deploy_status_human(status)
else:
deploy_status = 'NotRunning'
output.append(
" {}".format(
status_marathon_job_human(
service=service,
instance=instance,
deploy_status=deploy_status,
desired_app_id=marathon_status.app_id,
app_count=marathon_status.app_count,
running_instances=marathon_status.running_instance_count,
normal_instance_count=marathon_status.expected_instance_count,
),
),
)
return 0
def kubernetes_app_deploy_status_human(status, backoff_seconds=None):
status_string = kubernetes_tools.KubernetesDeployStatus.tostring(status)
if status == kubernetes_tools.KubernetesDeployStatus.Waiting:
deploy_status = "%s (new tasks waiting for capacity to become available)" % PaastaColors.red(status_string)
elif status == kubernetes_tools.KubernetesDeployStatus.Deploying:
deploy_status = PaastaColors.yellow(status_string)
elif status == kubernetes_tools.KubernetesDeployStatus.Running:
deploy_status = PaastaColors.bold(status_string)
else:
deploy_status = status_string
return deploy_status
def status_kubernetes_job_human(
service: str,
instance: str,
deploy_status: str,
desired_app_id: str,
app_count: int,
running_instances: int,
normal_instance_count: int,
) -> str:
name = PaastaColors.cyan(compose_job_id(service, instance))
if app_count >= 0:
if running_instances >= normal_instance_count:
status = PaastaColors.green("Healthy")
instance_count = PaastaColors.green("(%d/%d)" % (running_instances, normal_instance_count))
elif running_instances == 0:
status = PaastaColors.yellow("Critical")
instance_count = PaastaColors.red("(%d/%d)" % (running_instances, normal_instance_count))
else:
status = PaastaColors.yellow("Warning")
instance_count = PaastaColors.yellow("(%d/%d)" % (running_instances, normal_instance_count))
return "Kubernetes: {} - up with {} instances. Status: {}".format(
status, instance_count, deploy_status,
)
else:
status = PaastaColors.yellow("Warning")
return "Kubernetes: {} - {} (app {}) is not configured in Kubernetes yet (waiting for bounce)".format(
status, name, desired_app_id,
)
def print_flinkcluster_status(
cluster: str,
service: str,
instance: str,
output: List[str],
status,
verbose: int,
) -> int:
if status is None:
output.append(PaastaColors.red(" Flink cluster is not available yet"))
return 1
if status.state != "running":
output.append(" State: {state}".format(
state=PaastaColors.yellow(status.state),
))
output.append(f" No other information available in non-running state")
return 0
dashboard_url = get_dashboard_url(
cluster=cluster,
service=service,
instance=instance,
)
if verbose:
output.append(f" Flink version: {status.config['flink-version']} {status.config['flink-revision']}")
else:
output.append(f" Flink version: {status.config['flink-version']}")
output.append(f" URL: {dashboard_url}/")
output.append(f" State: {status.state}")
output.append(
" Jobs:"
f" {status.overview['jobs-running']} running,"
f" {status.overview['jobs-finished']} finished,"
f" {status.overview['jobs-failed']} failed,"
f" {status.overview['jobs-cancelled']} cancelled",
)
output.append(
" "
f" {status.overview['taskmanagers']} taskmanagers,"
f" {status.overview['slots-available']}/{status.overview['slots-total']} slots available",
)
output.append(f" Jobs:")
if verbose:
output.append(f" Job Name State Job ID Started")
else:
output.append(f" Job Name State Started")
# Use only the most recent jobs
unique_jobs = (
sorted(jobs, key=lambda j: -j['start-time'])[0]
for _, jobs in groupby(
sorted(status.jobs, key=lambda j: j['name']),
lambda j: j['name'],
)
)
for job in unique_jobs:
job_id = job['jid']
if verbose:
fmt = """ {job_name: <32.32} {state: <11} {job_id} {start_time}
{dashboard_url}"""
else:
fmt = " {job_name: <32.32} {state: <11} {start_time}"
start_time = datetime_from_utc_to_local(datetime.utcfromtimestamp(int(job['start-time']) // 1000))
output.append(fmt.format(
job_id=job_id,
job_name=job['name'].split('.', 2)[2],
state=job['state'],
start_time=f'{str(start_time)} ({humanize.naturaltime(start_time)})',
dashboard_url=PaastaColors.grey(
f'{dashboard_url}/#/jobs/{job_id}',
),
))
if job_id in status.exceptions:
exceptions = status.exceptions[job_id]
root_exception = exceptions['root-exception']
if root_exception is not None:
output.append(f" Exception: {root_exception}")
ts = exceptions['timestamp']
if ts is not None:
exc_ts = datetime_from_utc_to_local(datetime.utcfromtimestamp(int(ts) // 1000))
output.append(f" {str(exc_ts)} ({humanize.naturaltime(exc_ts)})")
return 0
def print_kubernetes_status(
service: str,
instance: str,
output: List[str],
kubernetes_status,
) -> int:
if kubernetes_status.error_message:
output.append(kubernetes_status.error_message)
return 1
bouncing_status = bouncing_status_human(
kubernetes_status.app_count,
kubernetes_status.bounce_method,
)
desired_state = desired_state_human(
kubernetes_status.desired_state,
kubernetes_status.expected_instance_count,
)
output.append(f" State: {bouncing_status} - Desired state: {desired_state}")
status = KubernetesDeployStatus.fromstring(kubernetes_status.deploy_status)
deploy_status = kubernetes_app_deploy_status_human(status)
output.append(
" {}".format(
status_kubernetes_job_human(
service=service,
instance=instance,
deploy_status=deploy_status,
desired_app_id=kubernetes_status.app_id,
app_count=kubernetes_status.app_count,
running_instances=kubernetes_status.running_instance_count,
normal_instance_count=kubernetes_status.expected_instance_count,
),
),
)
return 0
def print_tron_status(
service: str,
instance: str,
output: List[str],
tron_status,
verbose: int = 0,
) -> int:
output.append(f" Tron job: {tron_status.job_name}")
if verbose:
output.append(f" Status: {tron_status.job_status}")
output.append(f" Schedule: {tron_status.job_schedule}")
output.append(" Dashboard: {}".format(PaastaColors.blue(tron_status.job_url)))
output.append(f" Action: {tron_status.action_name}")
output.append(f" Status: {tron_status.action_state}")
if verbose:
output.append(f" Start time: {tron_status.action_start_time}")
output.append(f" Command: {tron_status.action_command}")
if verbose > 1:
output.append(f" Raw Command: {tron_status.action_raw_command}")
output.append(f" Stdout: \n{tron_status.action_stdout}")
output.append(f" Stderr: \n{tron_status.action_stderr}")
return 0
def report_status_for_cluster(
service: str,
cluster: str,
deploy_pipeline: Sequence[str],
actual_deployments: Mapping[str, str],
instance_whitelist: Mapping[str, Type[InstanceConfig]],
system_paasta_config: SystemPaastaConfig,
verbose: int = 0,
use_api_endpoint: bool = False,
) -> Tuple[int, Sequence[str]]:
"""With a given service and cluster, prints the status of the instances
in that cluster"""
output = ['', 'service: %s' % service, 'cluster: %s' % cluster]
seen_instances = []
deployed_instances = []
instances = instance_whitelist.keys()
http_only_instances = [
instance for instance, instance_config_class in instance_whitelist.items() if instance_config_class
in HTTP_ONLY_INSTANCE_CONFIG
]
ssh_only_instances = [
instance for instance, instance_config_class in instance_whitelist.items() if instance_config_class
in SSH_ONLY_INSTANCE_CONFIG
]
tron_jobs = [
instance for instance, instance_config_class in instance_whitelist.items() if instance_config_class
== TronActionConfig
]
for namespace in deploy_pipeline:
cluster_in_pipeline, instance = namespace.split('.')
seen_instances.append(instance)
if cluster_in_pipeline != cluster:
continue
if instances and instance not in instances:
continue
# Case: service deployed to cluster.instance
if namespace in actual_deployments:
deployed_instances.append(instance)
# Case: flinkcluster instances don't use `deployments.json`
elif instance_whitelist.get(instance) == FlinkClusterConfig:
deployed_instances.append(instance)
# Case: service NOT deployed to cluster.instance
else:
output.append(' instance: %s' % PaastaColors.red(instance))
output.append(' Git sha: None (not deployed yet)')
api_return_code = 0
ssh_return_code = 0
if len(deployed_instances) > 0:
http_only_deployed_instances = [
deployed_instance
for deployed_instance in deployed_instances
if (
deployed_instance in http_only_instances
or deployed_instance not in ssh_only_instances and use_api_endpoint
)
]
if len(http_only_deployed_instances):
return_codes = [
paasta_status_on_api_endpoint(
cluster=cluster,
service=service,
instance=deployed_instance,
output=output,
system_paasta_config=system_paasta_config,
verbose=verbose,
)
for deployed_instance in http_only_deployed_instances
]
if any(return_codes):
api_return_code = 1
ssh_only_deployed_instances = [
deployed_instance
for deployed_instance in deployed_instances
if (
deployed_instance in ssh_only_instances
or deployed_instance not in http_only_instances and not use_api_endpoint
)
]
if len(ssh_only_deployed_instances):
ssh_return_code, status = execute_paasta_serviceinit_on_remote_master(
'status', cluster, service, ','.join(
deployed_instance
for deployed_instance in ssh_only_deployed_instances
),
system_paasta_config, stream=False, verbose=verbose,
ignore_ssh_output=True,
)
# Status results are streamed. This print is for possible error messages.
if status is not None:
for line in status.rstrip().split('\n'):
output.append(' %s' % line)
if len(tron_jobs) > 0:
return_codes = [
paasta_status_on_api_endpoint(
cluster=cluster,
service=service,
instance=tron_job,
output=output,
system_paasta_config=system_paasta_config,
verbose=verbose,
)
for tron_job in tron_jobs
]
seen_instances.extend(tron_jobs)
output.append(report_invalid_whitelist_values(instances, seen_instances, 'instance'))
if ssh_return_code:
return_code = ssh_return_code
elif api_return_code:
return_code = api_return_code
else:
return_code = 0
return return_code, output
def report_invalid_whitelist_values(
whitelist: Iterable[str],
items: Sequence[str],
item_type: str,
) -> str:
"""Warns the user if there are entries in ``whitelist`` which don't
correspond to any item in ``items``. Helps highlight typos.
"""
return_string = ""
bogus_entries = []
if whitelist is None:
return ''
for entry in whitelist:
if entry not in items:
bogus_entries.append(entry)
if len(bogus_entries) > 0:
return_string = (
"\n"
"Warning: This service does not have any %s matching these names:\n%s"
) % (item_type, ",".join(bogus_entries))
return return_string
def verify_instances(
args_instances: str,
service: str,
clusters: Sequence[str],
) -> Sequence[str]:
"""Verify that a list of instances specified by user is correct for this service.
:param args_instances: a list of instances.
:param service: the service name
:param cluster: a list of clusters
:returns: a list of instances specified in args_instances without any exclusions.
"""
unverified_instances = args_instances.split(",")
service_instances: Set[str] = list_all_instances_for_service(service, clusters=clusters)
misspelled_instances: Sequence[str] = [i for i in unverified_instances if i not in service_instances]
if misspelled_instances:
suggestions: List[str] = []
for instance in misspelled_instances:
suggestions.extend(difflib.get_close_matches(instance, service_instances, n=5, cutoff=0.5)) # type: ignore
suggestions = list(set(suggestions))
if clusters:
message = (
"%s doesn't have any instances matching %s on %s."
% (
service,
', '.join(sorted(misspelled_instances)),
', '.join(sorted(clusters)),
)
)
else:
message = ("%s doesn't have any instances matching %s."
% (service, ', '.join(sorted(misspelled_instances))))
paasta_print(PaastaColors.red(message))
if suggestions:
paasta_print("Did you mean any of these?")
for instance in sorted(suggestions):
paasta_print(" %s" % instance)
return unverified_instances
def normalize_registrations(
service: str,
registrations: Sequence[str],
) -> Sequence[str]:
ret = []
for reg in registrations:
if '.' not in reg:
ret.append(f"{service}.{reg}")
else:
ret.append(reg)
return ret
def get_filters(
args,
) -> Sequence[Callable[[InstanceConfig], bool]]:
"""Figures out which filters to apply from an args object, and returns them
:param args: args object
:returns: list of functions that take an instance config and returns if the instance conf matches the filter
"""
filters = []
if args.service:
filters.append(lambda conf: conf.get_service() in args.service.split(','))
if args.clusters:
filters.append(lambda conf: conf.get_cluster() in args.clusters.split(','))
if args.instances:
filters.append(lambda conf: conf.get_instance() in args.instances.split(','))
if args.deploy_group:
filters.append(lambda conf: conf.get_deploy_group() in args.deploy_group.split(','))
if args.registration:
normalized_regs = normalize_registrations(
service=args.service,
registrations=args.registration.split(','),
)
filters.append(
lambda conf: any(
reg in normalized_regs
for reg in (conf.get_registrations() if hasattr(conf, 'get_registrations') else [])
),
)
if args.owner:
owners = args.owner.split(',')
filters.append(
# If the instance owner is None, check the service owner, else check the instance owner
lambda conf: get_team(
overrides={},
service=conf.get_service(),
soa_dir=args.soa_dir,
) in owners if conf.get_team() is None else conf.get_team() in owners,
)
return filters
def apply_args_filters(
args,
) -> Mapping[str, Mapping[str, Mapping[str, Type[InstanceConfig]]]]:
"""
Take an args object and returns the dict of cluster:service:instances
Currently, will filter by clusters, instances, services, and deploy_groups
If no instances are found, will print a message and try to find matching instances
for each service
:param args: args object containing attributes to filter by
:returns: Dict of dicts, in format {cluster_name: {service_name: {instance1, instance2}}}
"""
clusters_services_instances: DefaultDict[
str,
DefaultDict[
str, Dict[str, Type[InstanceConfig]]
]
] = defaultdict(lambda: defaultdict(dict))
if args.service is None and args.owner is None:
args.service = figure_out_service_name(args, soa_dir=args.soa_dir)
filters = get_filters(args)
all_services = list_services(soa_dir=args.soa_dir)
if args.service and args.service not in all_services:
paasta_print(PaastaColors.red(f'The service "{args.service}" does not exist.'))
suggestions = difflib.get_close_matches(args.service, all_services, n=5, cutoff=0.5)
if suggestions:
paasta_print(PaastaColors.red(f'Did you mean any of these?'))
for suggestion in suggestions:
paasta_print(PaastaColors.red(f' {suggestion}'))
return clusters_services_instances
i_count = 0
for service in all_services:
if args.service and service != args.service:
continue
for instance_conf in get_instance_configs_for_service(service, soa_dir=args.soa_dir):
if all([f(instance_conf) for f in filters]):
cluster_service = clusters_services_instances[instance_conf.get_cluster()][service]
cluster_service[instance_conf.get_instance()] = instance_conf.__class__
i_count += 1
if i_count == 0 and args.service and args.instances:
if args.clusters:
clusters = args.clusters.split(',')
else:
clusters = list_clusters()
for service in args.service.split(','):
verify_instances(args.instances, service, clusters)
return clusters_services_instances
def paasta_status(
args,
) -> int:
"""Print the status of a Yelp service running on PaaSTA.
:param args: argparse.Namespace obj created from sys.args by cli"""
soa_dir = args.soa_dir
system_paasta_config = load_system_paasta_config()
if 'USE_API_ENDPOINT' in os.environ:
use_api_endpoint = strtobool(os.environ['USE_API_ENDPOINT'])
else:
use_api_endpoint = False
return_codes = [0]
tasks = []
clusters_services_instances = apply_args_filters(args)
for cluster, service_instances in clusters_services_instances.items():
for service, instances in service_instances.items():
all_flink = all(i == FlinkClusterConfig for i in instances.values())
actual_deployments: Mapping[str, str]
if all_flink:
actual_deployments = {}
else:
actual_deployments = get_actual_deployments(service, soa_dir)
if all_flink or actual_deployments:
deploy_pipeline = list(get_planned_deployments(service, soa_dir))
tasks.append((
report_status_for_cluster, dict(
service=service,
cluster=cluster,
deploy_pipeline=deploy_pipeline,
actual_deployments=actual_deployments,
instance_whitelist=instances,
system_paasta_config=system_paasta_config,
verbose=args.verbose,
use_api_endpoint=use_api_endpoint,
),
))
else:
paasta_print(missing_deployments_message(service))
return_codes.append(1)
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
tasks = [executor.submit(t[0], **t[1]) for t in tasks] # type: ignore
for future in concurrent.futures.as_completed(tasks): # type: ignore
return_code, output = future.result()
paasta_print('\n'.join(output))
return_codes.append(return_code)
return max(return_codes)
|
#! /usr/bin/env python3
import argparse
import csv
import datetime
import json
import os
import sys
roast_fields = [
'dateTime',
'uid',
'roastNumber',
'roastName',
'beanId',
'rating',
'serialNumber',
'firmware',
'hardware',
{'fields': ['ambient', 'ambientTemp'], 'mapped_field': 'ambient', 'type': float},
{'fields': ['humidity', 'roomHumidity'], 'mapped_field': 'humidity', 'type': float},
{'fields': ['weightGreen'], 'mapped_field': 'weightGreen', 'type': float},
{'fields': ['weightRoasted'], 'mapped_field': 'weightRoasted', 'type': float},
'preheatTemperature',
'beanChargeTemperature',
'beanDropTemperature',
'drumChargeTemperature',
'drumDropTemperature',
'totalRoastTime',
'sampleRate',
'roastStartIndex',
'indexYellowingStart',
'indexFirstCrackStart',
'indexFirstCrackEnd',
'indexSecondCrackStart',
'indexSecondCrackEnd',
'roastEndIndex',
]
def set_roast_column(roast_json, roast_columns, roast_field):
if 'fields' in roast_field:
for field in roast_field['fields']:
if field in roast_json:
roast_columns[roast_field['mapped_field']] = roast_field['type'](roast_json[field])
return
roast_columns[roast_field['mapped_field']] = ''
return
roast_columns[roast_field] = roast_json.get(roast_field, None)
def load_roast(roast_pathname):
sys.stderr.write(f'loading {roast_pathname}\n')
with open(roast_pathname, 'r', encoding='utf-8') as roast_file:
roast_json = json.load(roast_file)
roast = {}
for roast_field in roast_fields:
set_roast_column(roast_json, roast, roast_field)
roast['date'] = datetime.datetime.fromtimestamp(roast['dateTime'] / 1000).strftime('%Y-%m-%d')
roast['time'] = datetime.datetime.fromtimestamp(roast['dateTime'] / 1000).strftime('%H:%M:%S')
return roast
def load_roasts(roast_dirname):
roasts = []
for roast_filename in os.listdir(roast_dirname):
roast = load_roast(os.path.join(roast_dirname, roast_filename))
roasts.append(roast)
return roasts
def get_fields():
return [f if 'mapped_field' not in f else f['mapped_field'] for f in roast_fields]
def main():
default_fields = ['date', 'time', 'beanId', 'weightGreen']
valid_fields = ', '.join(get_fields())
epilog = f'Valid field names are: {valid_fields}'
parser = argparse.ArgumentParser(description='Convert RoasTime roast data to CSV.', epilog=epilog)
parser.add_argument('-f', '--fields', help=f'comma-separated list of fields (default is {','.join(default_fields)})')
parser.add_argument('output_file', metavar='PATH', help='CSV file name')
roast_path = os.path.join(os.path.expanduser("~"), "Library/Application Support/roast-time/roasts")
args = parser.parse_args()
roasts = load_roasts(roast_path)
fields = default_fields if args.fields is None else args.fields.split(",")
with open(args.output_file, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(fields)
for roast in roasts:
writer.writerow([roast[field] for field in fields])
if __name__ == "__main__":
rv = main()
sys.exit(rv)
|
#! /usr/bin/env python3
import argparse
import csv
import datetime
import json
import os
import sys
roast_fields = [
'dateTime',
'uid',
'roastNumber',
'roastName',
'beanId',
'rating',
'serialNumber',
'firmware',
'hardware',
{'fields': ['ambient', 'ambientTemp'], 'mapped_field': 'ambient', 'type': float},
{'fields': ['humidity', 'roomHumidity'], 'mapped_field': 'humidity', 'type': float},
{'fields': ['weightGreen'], 'mapped_field': 'weightGreen', 'type': float},
{'fields': ['weightRoasted'], 'mapped_field': 'weightRoasted', 'type': float},
'preheatTemperature',
'beanChargeTemperature',
'beanDropTemperature',
'drumChargeTemperature',
'drumDropTemperature',
'totalRoastTime',
'sampleRate',
'roastStartIndex',
'indexYellowingStart',
'indexFirstCrackStart',
'indexFirstCrackEnd',
'indexSecondCrackStart',
'indexSecondCrackEnd',
'roastEndIndex',
]
def set_roast_column(roast_json, roast_columns, roast_field):
if 'fields' in roast_field:
for field in roast_field['fields']:
if field in roast_json:
roast_columns[roast_field['mapped_field']] = roast_field['type'](roast_json[field])
return
roast_columns[roast_field['mapped_field']] = ''
return
roast_columns[roast_field] = roast_json.get(roast_field, None)
def load_roast(roast_pathname):
sys.stderr.write(f'loading {roast_pathname}\n')
with open(roast_pathname, 'r', encoding='utf-8') as roast_file:
roast_json = json.load(roast_file)
roast = {}
for roast_field in roast_fields:
set_roast_column(roast_json, roast, roast_field)
roast['date'] = datetime.datetime.fromtimestamp(roast['dateTime'] / 1000).strftime('%Y-%m-%d')
roast['time'] = datetime.datetime.fromtimestamp(roast['dateTime'] / 1000).strftime('%H:%M:%S')
return roast
def load_roasts(roast_dirname):
roasts = []
for roast_filename in os.listdir(roast_dirname):
roast = load_roast(os.path.join(roast_dirname, roast_filename))
roasts.append(roast)
return roasts
def get_fields():
return [f if 'mapped_field' not in f else f['mapped_field'] for f in roast_fields]
def main():
default_fields = ['date', 'time', 'beanId', 'weightGreen']
valid_fields = ', '.join(get_fields())
epilog = f'Valid field names are: {valid_fields}'
parser = argparse.ArgumentParser(description='Convert RoasTime roast data to CSV.', epilog=epilog)
parser.add_argument('-f', '--fields', help=f'comma-separated list of fields (default is {",".join(default_fields)})')
parser.add_argument('output_file', metavar='PATH', help='CSV file name')
roast_path = os.path.join(os.path.expanduser("~"), "Library/Application Support/roast-time/roasts")
args = parser.parse_args()
roasts = load_roasts(roast_path)
fields = default_fields if args.fields is None else args.fields.split(",")
with open(args.output_file, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(fields)
for roast in roasts:
writer.writerow([roast[field] for field in fields])
if __name__ == "__main__":
rv = main()
sys.exit(rv)
|
"""
Здесь собраны все команды настроек
"""
import json
from vkbottle.user import Blueprint, Message
from utils.edit_msg import edit_msg
from utils.emojis import ENABLED, DISABLED, ERROR
from filters import ForEveryoneRule
bp = Blueprint("Settings command")
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>для всех <command>")
async def for_everyone_handler(message: Message, command):
"""
Команда для изменения доступности других команд для других людей
"""
with open("commands_for_everyone.json", "r", encoding="utf-8") as file:
content = json.load(file)
if command == "default":
with open("commands_for_everyone.json", "w", encoding="utf-8") as file:
content = {
"advancements": True,
"blank": True,
"bomb": True,
"code": False,
"demotivator": True,
"info": True,
"interactive_commands": True,
"ping": True,
"random_case": True,
"settings": False,
"show_config": False,
}
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"{ENABLED} | Настройки для всех вернуты к значению по умолчанию",
)
elif command == "none":
with open("commands_for_everyone.json", "w", encoding="utf-8") as file:
for allowed_command in content:
content[allowed_command] = False
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api, message, f"{DISABLED} | Все команды для всех выключены"
)
elif command not in content:
await edit_msg(bp.api, message, f"{ERROR} | Такой команды нет ")
else:
if content[command]:
content[command] = False
with open(
"commands_for_everyone.json", "w", encoding="utf-8"
) as file:
content[command] = False
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api, message, f"{DISABLED} | Команда {command} отключена "
)
else:
content[command] = True
with open(
"commands_for_everyone.json", "w", encoding="utf-8"
) as file:
content[command] = True
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"Команда {command} включена " + ENABLED,
)
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>для всех")
async def show_for_everyone_handler(message: Message):
"""
Команда для проверки доступности команд для других людей
"""
with open("commands_for_everyone.json", "r", encoding="utf-8") as file:
content = json.load(file)
text = "Команды для всех:\n"
for command in content:
if content[command]:
text += f"{command} | {ENABLED}\n"
else:
text += f"{command} | {DISABLED}\n"
await edit_msg(bp.api, message, text)
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>время бомбы <time>")
async def set_bomb_time_handler(message: Message, time):
"""
Команда для настройки времени бомбы (!бомба)
"""
try:
time = int(time)
except ValueError:
await edit_msg(
bp.api,
message,
"Время бомбы - не число! " + ERROR,
)
return
if time < 1:
await edit_msg(
bp.api,
message,
"Время бомбы не может быть меньше 1! " + ERROR,
)
else:
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
with open("config.json", "w", encoding="utf-8") as file:
content["bomb_time"] = int(message.text.split()[2])
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"{ENABLED} | Время бомбы изменено на "
f"{content["bomb_time"]} секунд ",
)
@bp.on.message(
ForEveryoneRule("settings"), text="<prefix>время удаления <time>"
)
async def set_delete_time_handler(message: Message, time):
"""
Команда для настройки времени удаления всех выполненных команд
"""
try:
time = int(time)
except ValueError:
await edit_msg(
bp.api,
message,
"Время удаления - не число! " + ERROR,
)
return
if time < 0:
await edit_msg(
bp.api,
message,
"Время удаления не может быть меньше 0! " + ERROR,
)
else:
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
with open("config.json", "w", encoding="utf-8") as file:
content["delete_after"] = int(message.text.split()[2])
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"{ENABLED} | Время удаления изменено на "
f"{content["delete_after"]} секунд",
)
@bp.on.message(
ForEveryoneRule("settings"), text="<prefix>префикс <prefix_new>"
)
async def set_prefix_handler(message: Message, prefix_new):
"""
Команда для изменения префикса бота
"""
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
with open("config.json", "w", encoding="utf-8") as file:
content["prefix"] = prefix_new
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f'{ENABLED} | Ваш префикс изменился на '{content['prefix']}"!',
)
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>инфо лс")
async def info_in_dm_handler(message: Message):
"""
Команда для изменения отправки информации о людях (!инфо)
"""
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
if content["send_info_in_dm"]:
content["send_info_in_dm"] = False
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
"👥 | Теперь информация будет присылаться в чат",
)
else:
content["send_info_in_dm"] = True
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
"👤 | Теперь информация будет присылаться в лс",
)
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>ред")
async def edit_or_del_handler(message: Message):
"""
Команда для выбора - редактировать, или удалять команды
"""
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
if content["edit_or_send"] == "edit":
content["edit_or_send"] = "send"
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"{DISABLED} | Теперь сообщения будут отправляться, а не "
"редактироваться",
)
else:
content["edit_or_send"] = "edit"
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"{ENABLED} | Теперь сообщения будут редактироваться, а не "
"отправляться",
)
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>debug")
async def debug_mode_handler(message: Message):
"""
Команда для включения и выключения режима debug
"""
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
if content["debug"]:
content["debug"] = False
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(bp.api, message, f"{DISABLED} | Debug-режим выключен")
else:
content["debug"] = True
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(bp.api, message, f"{ENABLED} | Debug-режим включен")
|
"""
Здесь собраны все команды настроек
"""
import json
from vkbottle.user import Blueprint, Message
from utils.edit_msg import edit_msg
from utils.emojis import ENABLED, DISABLED, ERROR
from filters import ForEveryoneRule
bp = Blueprint("Settings command")
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>для всех <command>")
async def for_everyone_handler(message: Message, command):
"""
Команда для изменения доступности других команд для других людей
"""
with open("commands_for_everyone.json", "r", encoding="utf-8") as file:
content = json.load(file)
if command == "default":
with open("commands_for_everyone.json", "w", encoding="utf-8") as file:
content = {
"advancements": True,
"blank": True,
"bomb": True,
"code": False,
"demotivator": True,
"info": True,
"interactive_commands": True,
"ping": True,
"random_case": True,
"settings": False,
"show_config": False,
}
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"{ENABLED} | Настройки для всех вернуты к значению по умолчанию",
)
elif command == "none":
with open("commands_for_everyone.json", "w", encoding="utf-8") as file:
for allowed_command in content:
content[allowed_command] = False
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api, message, f"{DISABLED} | Все команды для всех выключены"
)
elif command not in content:
await edit_msg(bp.api, message, f"{ERROR} | Такой команды нет ")
else:
if content[command]:
content[command] = False
with open(
"commands_for_everyone.json", "w", encoding="utf-8"
) as file:
content[command] = False
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api, message, f"{DISABLED} | Команда {command} отключена "
)
else:
content[command] = True
with open(
"commands_for_everyone.json", "w", encoding="utf-8"
) as file:
content[command] = True
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"Команда {command} включена " + ENABLED,
)
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>для всех")
async def show_for_everyone_handler(message: Message):
"""
Команда для проверки доступности команд для других людей
"""
with open("commands_for_everyone.json", "r", encoding="utf-8") as file:
content = json.load(file)
text = "Команды для всех:\n"
for command in content:
if content[command]:
text += f"{command} | {ENABLED}\n"
else:
text += f"{command} | {DISABLED}\n"
await edit_msg(bp.api, message, text)
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>время бомбы <time>")
async def set_bomb_time_handler(message: Message, time):
"""
Команда для настройки времени бомбы (!бомба)
"""
try:
time = int(time)
except ValueError:
await edit_msg(
bp.api,
message,
"Время бомбы - не число! " + ERROR,
)
return
if time < 1:
await edit_msg(
bp.api,
message,
"Время бомбы не может быть меньше 1! " + ERROR,
)
else:
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
with open("config.json", "w", encoding="utf-8") as file:
content["bomb_time"] = int(message.text.split()[2])
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"{ENABLED} | Время бомбы изменено на "
f"{content['bomb_time']} секунд ",
)
@bp.on.message(
ForEveryoneRule("settings"), text="<prefix>время удаления <time>"
)
async def set_delete_time_handler(message: Message, time):
"""
Команда для настройки времени удаления всех выполненных команд
"""
try:
time = int(time)
except ValueError:
await edit_msg(
bp.api,
message,
"Время удаления - не число! " + ERROR,
)
return
if time < 0:
await edit_msg(
bp.api,
message,
"Время удаления не может быть меньше 0! " + ERROR,
)
else:
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
with open("config.json", "w", encoding="utf-8") as file:
content["delete_after"] = int(message.text.split()[2])
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"{ENABLED} | Время удаления изменено на "
f"{content['delete_after']} секунд",
)
@bp.on.message(
ForEveryoneRule("settings"), text="<prefix>префикс <prefix_new>"
)
async def set_prefix_handler(message: Message, prefix_new):
"""
Команда для изменения префикса бота
"""
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
with open("config.json", "w", encoding="utf-8") as file:
content["prefix"] = prefix_new
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f'{ENABLED} | Ваш префикс изменился на "{content["prefix"]}"!',
)
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>инфо лс")
async def info_in_dm_handler(message: Message):
"""
Команда для изменения отправки информации о людях (!инфо)
"""
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
if content["send_info_in_dm"]:
content["send_info_in_dm"] = False
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
"👥 | Теперь информация будет присылаться в чат",
)
else:
content["send_info_in_dm"] = True
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
"👤 | Теперь информация будет присылаться в лс",
)
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>ред")
async def edit_or_del_handler(message: Message):
"""
Команда для выбора - редактировать, или удалять команды
"""
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
if content["edit_or_send"] == "edit":
content["edit_or_send"] = "send"
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"{DISABLED} | Теперь сообщения будут отправляться, а не "
"редактироваться",
)
else:
content["edit_or_send"] = "edit"
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(
bp.api,
message,
f"{ENABLED} | Теперь сообщения будут редактироваться, а не "
"отправляться",
)
@bp.on.message(ForEveryoneRule("settings"), text="<prefix>debug")
async def debug_mode_handler(message: Message):
"""
Команда для включения и выключения режима debug
"""
with open("config.json", "r", encoding="utf-8") as file:
content = json.load(file)
if content["debug"]:
content["debug"] = False
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(bp.api, message, f"{DISABLED} | Debug-режим выключен")
else:
content["debug"] = True
with open("config.json", "w", encoding="utf-8") as file:
file.write(json.dumps(content, indent=4))
await edit_msg(bp.api, message, f"{ENABLED} | Debug-режим включен")
|
import os
import json
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from collections import OrderedDict
from sg2im.utils import timeit, bool_flag, LossManager
from sg2im.utils import int_tuple, float_tuple, str_tuple
from sg2im.data.vg import SequenceTransformerVgSceneGraphDataset
import pytorch_lightning as pl
from transformers import (
BertTokenizerFast,
BertTokenizer,
EncoderDecoderModel,
EncoderDecoderConfig,
AutoModel,
BertForSequenceClassification,
)
from pytorch_lightning.plugins import DDPPlugin
VG_DIR = os.path.expanduser('datasets/vg')
COCO_DIR = os.path.expanduser('datasets/coco')
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true', default=False)
parser.add_argument('--dataset', default='coco', choices=['vg', 'coco'])
parser.add_argument('--scene_graphs_json', default='scene_graphs/figure_6_sheep.json')
parser.add_argument('--load_checkpoint', default="")
# Optimization hyperparameters
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--num_iterations', default=1000000, type=int)
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--gpus', default=1, type=int)
# Switch the generator to eval mode after this many iterations
parser.add_argument('--eval_mode_after', default=100000, type=int)
# Dataset options common to both VG and COCO
parser.add_argument('--image_size', default='64,64', type=int_tuple)
parser.add_argument('--num_train_samples', default=None, type=int)
parser.add_argument('--num_val_samples', default=1024, type=int)
parser.add_argument('--shuffle_val', default=True, type=bool_flag)
parser.add_argument('--loader_num_workers', default=4, type=int)
parser.add_argument('--include_relationships', default=True, type=bool_flag)
# VG-specific options
parser.add_argument('--vg_image_dir', default=os.path.join(VG_DIR, 'images'))
parser.add_argument('--train_h5', default=os.path.join(VG_DIR, 'train.h5'))
parser.add_argument('--val_h5', default=os.path.join(VG_DIR, 'val.h5'))
parser.add_argument('--vocab_json', default=os.path.join(VG_DIR, 'vocab.json'))
parser.add_argument('--max_objects_per_image', default=10, type=int)
parser.add_argument('--vg_use_orphaned_objects', default=True, type=bool_flag)
# COCO-specific options
parser.add_argument('--coco_train_image_dir',
default=os.path.join(COCO_DIR, 'images/train2017'))
parser.add_argument('--coco_val_image_dir',
default=os.path.join(COCO_DIR, 'images/val2017'))
parser.add_argument('--coco_train_instances_json',
default=os.path.join(COCO_DIR, 'annotations/instances_train2017.json'))
parser.add_argument('--coco_train_stuff_json',
default=os.path.join(COCO_DIR, 'annotations/stuff_train2017.json'))
parser.add_argument('--coco_val_instances_json',
default=os.path.join(COCO_DIR, 'annotations/instances_val2017.json'))
parser.add_argument('--coco_val_stuff_json',
default=os.path.join(COCO_DIR, 'annotations/stuff_val2017.json'))
parser.add_argument('--instance_whitelist', default=None, type=str_tuple)
parser.add_argument('--stuff_whitelist', default=None, type=str_tuple)
parser.add_argument('--coco_include_other', default=False, type=bool_flag)
parser.add_argument('--min_object_size', default=0.02, type=float)
parser.add_argument('--min_objects_per_image', default=3, type=int)
parser.add_argument('--coco_stuff_only', default=True, type=bool_flag)
parser.add_argument('--max_lengths_for_image', default=1024, type=int)
# Generator options
parser.add_argument('--mask_size', default=16, type=int) # Set this to 0 to use no masks
parser.add_argument('--embedding_dim', default=128, type=int)
parser.add_argument('--gconv_dim', default=128, type=int)
parser.add_argument('--gconv_hidden_dim', default=512, type=int)
parser.add_argument('--gconv_num_layers', default=5, type=int)
parser.add_argument('--mlp_normalization', default='none', type=str)
parser.add_argument('--refinement_network_dims', default='1024,512,256,128,64', type=int_tuple)
parser.add_argument('--normalization', default='batch')
parser.add_argument('--activation', default='leakyrelu-0.2')
parser.add_argument('--layout_noise_dim', default=32, type=int)
parser.add_argument('--use_boxes_pred_after', default=-1, type=int)
# Generator losses
parser.add_argument('--mask_loss_weight', default=0, type=float)
parser.add_argument('--l1_pixel_loss_weight', default=1.0, type=float)
parser.add_argument('--bbox_pred_loss_weight', default=10, type=float)
parser.add_argument('--predicate_pred_loss_weight', default=0, type=float) # DEPRECATED
# Generic discriminator options
parser.add_argument('--discriminator_loss_weight', default=0.01, type=float)
parser.add_argument('--gan_loss_type', default='gan')
parser.add_argument('--d_clip', default=None, type=float)
parser.add_argument('--d_normalization', default='batch')
parser.add_argument('--d_padding', default='valid')
parser.add_argument('--d_activation', default='leakyrelu-0.2')
# Object discriminator
parser.add_argument('--d_obj_arch',
default='C4-64-2,C4-128-2,C4-256-2')
parser.add_argument('--crop_size', default=32, type=int)
parser.add_argument('--d_obj_weight', default=1.0, type=float) # multiplied by d_loss_weight
parser.add_argument('--ac_loss_weight', default=0.1, type=float)
# Image discriminator
parser.add_argument('--d_img_arch',
default='C4-64-2,C4-128-2,C4-256-2')
parser.add_argument('--d_img_weight', default=1.0, type=float) # multiplied by d_loss_weight
# Output options
parser.add_argument('--print_every', default=10, type=int)
parser.add_argument('--timing', default=False, type=bool_flag)
parser.add_argument('--checkpoint_every', default=10000, type=int)
parser.add_argument('--output_dir', default=os.getcwd())
parser.add_argument('--checkpoint_name', default='checkpoint')
parser.add_argument('--checkpoint_start_from', default=None)
parser.add_argument('--restore_from_checkpoint', default=False, type=bool_flag)
class VGDataModule(pl.LightningDataModule):
def __init__(self, args, tokenizer, num_workers=8):
super().__init__()
self.args = args
self.tokenizer = tokenizer
self.num_workers = num_workers
self.batch_size = args.batch_size
def setup(self, stage=None):
args = self.args
with open(args.vocab_json, 'r') as f:
vocab = json.load(f)
dset_kwargs = {
'vocab': vocab,
'h5_path': args.train_h5,
'image_dir': args.vg_image_dir,
'image_size': args.image_size,
'max_samples': args.num_train_samples,
'max_objects': args.max_objects_per_image,
'use_orphaned_objects': args.vg_use_orphaned_objects,
'include_relationships': args.include_relationships,
'max_lengths_for_image': args.max_lengths_for_image
}
train_dset = SequenceTransformerVgSceneGraphDataset(
**dset_kwargs, tokenizer=self.tokenizer
)
# iter_per_epoch = len(train_dset) // args.batch_size
# print('There are %d iterations per epoch' % iter_per_epoch)
dset_kwargs['h5_path'] = args.val_h5
del dset_kwargs['max_samples']
val_dset = SequenceTransformerVgSceneGraphDataset(
**dset_kwargs, tokenizer=self.tokenizer
)
self.train_dset = train_dset
self.val_dset = val_dset
def train_dataloader(self):
return DataLoader(
self.train_dset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True
)
def val_dataloader(self):
return DataLoader(self.val_dset, batch_size=self.batch_size, num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.val_dset, batch_size=self.batch_size, num_workers=self.num_workers)
class Discriminator(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = BertForSequenceClassification.from_pretrained(backbone)
def forward(self, *args, **kwargs):
outputs = self.backbone(*args, **kwargs)
return outputs["loss"]
def apply_word_embeddings(self, inputs):
"""
Because Gumbel softmax outputs cannot directly feed to huggingface model,
we have to compute the `input_embed` manually.
"""
word_embeddings = self.backbone.bert.embeddings.word_embeddings
return torch.matmul(inputs, word_embeddings.weight)
class Generator(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = EncoderDecoderModel.from_encoder_decoder_pretrained(
backbone, backbone, tie_encoder_decoder=True
)
def forward(self, *args, **kwargs):
return self.backbone(*args, **kwargs)
def forward_logits(self, *args, **kwargs):
return self.backbone(*args, **kwargs)["logits"]
def forward_loss(self, *args, **kwargs):
return self.backbone(*args, **kwargs)["loss"]
def apply_word_embeddings(self, inputs):
"""
Because Gumbel softmax outputs cannot directly feed to huggingface model,
we have to compute the `input_embed` manually.
"""
word_embeddings = self.backbone.encoder.embeddings.word_embeddings
return torch.matmul(inputs, word_embeddings.weight)
class GAN(pl.LightningModule):
def __init__(
self,
args,
tokenizer,
backbone=None,
):
super().__init__()
self.args = args
self.validation_z = torch.randn(8, 100)
self.tokenizer = tokenizer
self.discriminator = Discriminator(backbone)
self.generator = Generator(backbone)
self.graph_special_token = "[graph]"
self.image_special_token = "[image]"
self.tau = 1
self.image_token_id_list, self.text_token_id_list = self.retrieve_bad_image_text_tokens_ids()
def retrieve_bad_image_text_tokens_ids(self):
special_tokens_list = ["[CLS]", "[SEP]"]
image_tokens_list = [f"[itoken{i}]" for i in range(512)]
extra_image_tokens_list = [f"[itoken{i}]" for i in range(512, 32 * 32)]
vocab = self.tokenizer.get_vocab()
special_tokens_id_list = [vocab[token] for token in special_tokens_list]
image_token_id_list = [vocab[token] for token in image_tokens_list]
extra_image_tokens_id_list = [vocab[token] for token in extra_image_tokens_list]
text_token_id_list = [v for k, v in vocab.items()]
text_token_id_list = \
list(set(text_token_id_list) - set(image_token_id_list) - set(extra_image_tokens_id_list))
return image_token_id_list + extra_image_tokens_id_list, text_token_id_list + extra_image_tokens_id_list
def adversarial_loss(self, y_hat, y):
return F.binary_cross_entropy_with_logits(y_hat, y)
def training_step(self, batch, batch_idx, optimizer_idx):
# sample noise
# z = torch.randn(imgs.shape[0], self.hparams.latent_dim)
# z = z.type_as(imgs)
generator_batch = {
"input_ids": batch["sent_input/input_ids"],
"attention_mask": batch["sent_input/attention_mask"],
"decoder_input_ids": batch["code_output/input_ids"],
"decoder_attention_mask": batch["code_output/attention_mask"],
"labels": batch["code_output/input_ids"].clone()
}
# exlude the loss for padding tokens
generator_batch["labels"][generator_batch["labels"] == self.tokenizer.pad_token_id] = -100
# train generator
if optimizer_idx == 0:
logits = self.generator.forward_logits(**generator_batch)
predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)
# log sampled images
# sample_imgs = self.generated_imgs[:6]
# grid = torchvision.utils.make_grid(sample_imgs)
# self.logger.experiment.add_image('generated_images', grid, 0)
# ground truth result (ie: all fake)
# put on GPU because we created this tensor inside training_loop
predictions_embedding = self.generator.apply_word_embeddings(predictions)
fake_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"decoder_input_ids": batch["sent_output/input_ids"],
"decoder_attention_mask": batch["sent_output/attention_mask"],
"labels": batch["sent_output/input_ids"].clone()
}
fake_batch["labels"][fake_batch["labels"] == self.tokenizer.pad_token_id] = -100
ac_loss = self.generator.forward_loss(**fake_batch)
predictions_embedding = self.discriminator.apply_word_embeddings(predictions)
fake_dis_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"labels": torch.ones(predictions_embedding.shape[0]).type_as(predictions_embedding).long()
}
g_d_loss = self.discriminator(**fake_dis_batch)
g_loss = g_d_loss + ac_loss
# g_loss = ac_loss
self.log('g_ac_loss', ac_loss, prog_bar=True)
self.log('g_d_loss', g_d_loss, prog_bar=True)
# return {"loss": g_loss}
# train discriminator (inverse generator)
# if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
logits = self.generator.forward_logits(**generator_batch)
predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)
# don't compute the gradients of the generator
predictions = predictions.detach()
predictions_embedding = self.generator.apply_word_embeddings(predictions)
fake_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"decoder_input_ids": batch["sent_output/input_ids"],
"decoder_attention_mask": batch["sent_output/attention_mask"],
"labels": batch["sent_output/input_ids"].clone()
}
fake_batch["labels"][fake_batch["labels"] == self.tokenizer.pad_token_id] = -100
fake_ac_loss = self.generator.forward_loss(**fake_batch)
# For real data
real_batch = {
"input_ids": batch["code_output/input_ids"],
"attention_mask": batch["code_output/attention_mask"],
"decoder_input_ids": batch["sent_output/input_ids"],
"decoder_attention_mask": batch["sent_output/attention_mask"],
"labels": batch["sent_output/input_ids"].clone()
}
real_batch["labels"][real_batch["labels"] == self.tokenizer.pad_token_id] = -100
real_ac_loss = self.generator.forward_loss(**real_batch)
ac_loss = (real_ac_loss + fake_ac_loss) / 2
self.log('ac_loss', ac_loss, prog_bar=True)
# return {"loss": ac_loss}
return g_loss + ac_loss
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
logits = self.generator.forward_logits(**generator_batch)
# don't compute the gradients of the generator
predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)
predictions_embedding = self.discriminator.apply_word_embeddings(predictions)
fake_dis_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"labels": torch.zeros(predictions.shape[0]).type_as(predictions).long()
}
fake_loss = self.discriminator(**fake_dis_batch)
# fake = torch.zeros(fake_preds.shape)
# fake = fake.type_as(fake_preds)
# fake_loss = self.adversarial_loss(fake_preds, fake)
real_dis_batch = {
"input_ids": batch["code_output/input_ids"],
"attention_mask": batch["code_output/attention_mask"],
"labels": torch.ones(predictions.shape[0]).type_as(predictions).long()
}
real_loss = self.discriminator(**real_dis_batch)
# real = torch.ones(real_preds.shape)
# real = real.type_as(real_preds)
# real_loss = self.adversarial_loss(real_preds, real)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
self.log('d_loss', d_loss, prog_bar=True)
return d_loss
def configure_optimizers(self):
lr = self.args.learning_rate
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(0.5, 0.999))
opt_d = torch.optim.Adam(
self.discriminator.parameters(),
lr=lr,
betas=(0.5, 0.999)
)
return [opt_g, opt_d], []
# def on_epoch_end(self):
# z = self.validation_z.type_as(self.generator.model[0].weight)
# # log sampled images
# sample_imgs = self(z)
# grid = torchvision.utils.make_grid(sample_imgs)
# self.logger.experiment.add_image('generated_images', grid, self.current_epoch)
def test_step(self, batch, batch_idx):
pass
def inference(self, scene_graphs_json):
scene_graphs = self.read_scene_graphs(scene_graphs_json)
image_tokens_generation = self.generator.backbone.generate(
scene_graphs["input_ids"],
max_length=66,
# num_beams=5,
# no_repeat_ngram_size=2,
# early_stopping=True,
do_sample=True,
top_p=0.92,
top_k=0,
decoder_start_token_id=self.generator.backbone.config.decoder.pad_token_id,
bad_words_ids=[[ids] for ids in self.text_token_id_list],
)
print(image_tokens_generation)
output = []
for data in image_tokens_generation:
output.append(self.tokenizer.decode(data, skip_special_tokens=True))
print(output[-1])
reconstructed_graph = self.generator.backbone.generate(
image_tokens_generation,
max_length=64,
# num_beams=5,
# no_repeat_ngram_size=2,
# early_stopping=True,
do_sample=True,
top_p=0.92,
top_k=0,
decoder_start_token_id=self.generator.backbone.config.decoder.pad_token_id,
bad_words_ids=[[ids]for ids in self.image_token_id_list],
)
for data in reconstructed_graph:
print(self.tokenizer.decode(data, skip_special_tokens=True))
if not os.path.exists(self.args.output_dir):
os.makedirs(self.args.output_dir)
itokens_output_file = os.path.join(self.args.output_dir, "itokens_output.json")
with open(itokens_output_file, "w") as f:
json.dump(output, f, indent=2)
def read_scene_graphs(self, scene_graphs_json):
with open(scene_graphs_json, 'r') as f:
scene_graphs = json.load(f)
if isinstance(scene_graphs, dict):
# We just got a single scene graph, so promote it to a list
scene_graphs = [scene_graphs]
objs, triples, obj_to_img = [], [], []
obj_offset = 0
sents_list = []
for i, sg in enumerate(scene_graphs):
# Insert dummy __image__ object and __in_image__ relationships
sents = []
for s, p, o in sg['relationships']:
sent = f"{sg["objects"][s]} {p} {sg["objects"][o]}."
sents.append(sent)
sent = " ".join(sents)
sent = f"{self.graph_special_token} {sent} {self.image_special_token}"
sents_list.append(sent)
print(sent)
sent_tensor = self.tokenizer(
sents_list,
return_tensors="pt",
padding="max_length",
max_length=64,
truncation=True,
add_special_tokens=False
)
device = next(self.parameters()).device
sent_tensor = {k: v.to(device) for k, v in sent_tensor.items()}
return sent_tensor
def main(args):
backbone = "bert-base-uncased-itokens"
tokenizer = BertTokenizerFast.from_pretrained(backbone)
# encoder_decoder_config = EncoderDecoderConfig.from_pretrained("bert-base-uncased-itokens")
# model = EncoderDecoderModel.from_pretrained(
# "bert-base-uncased-itokens", config=encoder_decoder_config
# )
# model = EncoderDecoderModel.from_encoder_decoder_pretrained(
# "bert-base-uncased-itokens", "bert-base-uncased-itokens", tie_encoder_decoder=True
# )
# generator = Generator(model)
# discriminator = Discriminator(
# AutoModel.from_pretrained("bert-base-uncased-itokens")
# )
if args.test:
model = GAN.load_from_checkpoint(
args.load_checkpoint,
args=args,
tokenizer=tokenizer,
backbone=backbone
)
model.cuda()
model.eval()
model.inference(args.scene_graphs_json)
return
# train
if args.gpus > 1:
dm = VGDataModule(args, tokenizer, 2)
else:
dm = VGDataModule(args, tokenizer)
if args.load_checkpoint != "":
model = GAN.load_from_checkpoint(
args.load_checkpoint,
args=args,
tokenizer=tokenizer,
backbone=backbone
)
else:
model = GAN(args, tokenizer, backbone)
training_args = {
"gpus": args.gpus,
"fast_dev_run": False,
"max_steps": args.num_iterations,
"precision": 32,
"gradient_clip_val": 1,
}
if args.gpus > 1:
additional_args = {
"accelerator": "ddp",
"plugins": [DDPPlugin(find_unused_parameters=True)]
# "plugins": [my_ddp]
}
training_args.update(additional_args)
trainer = pl.Trainer(**training_args)
trainer.fit(model, dm)
if __name__ == "__main__":
args = parser.parse_args()
main(args)
|
import os
import json
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from collections import OrderedDict
from sg2im.utils import timeit, bool_flag, LossManager
from sg2im.utils import int_tuple, float_tuple, str_tuple
from sg2im.data.vg import SequenceTransformerVgSceneGraphDataset
import pytorch_lightning as pl
from transformers import (
BertTokenizerFast,
BertTokenizer,
EncoderDecoderModel,
EncoderDecoderConfig,
AutoModel,
BertForSequenceClassification,
)
from pytorch_lightning.plugins import DDPPlugin
VG_DIR = os.path.expanduser('datasets/vg')
COCO_DIR = os.path.expanduser('datasets/coco')
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true', default=False)
parser.add_argument('--dataset', default='coco', choices=['vg', 'coco'])
parser.add_argument('--scene_graphs_json', default='scene_graphs/figure_6_sheep.json')
parser.add_argument('--load_checkpoint', default="")
# Optimization hyperparameters
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--num_iterations', default=1000000, type=int)
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--gpus', default=1, type=int)
# Switch the generator to eval mode after this many iterations
parser.add_argument('--eval_mode_after', default=100000, type=int)
# Dataset options common to both VG and COCO
parser.add_argument('--image_size', default='64,64', type=int_tuple)
parser.add_argument('--num_train_samples', default=None, type=int)
parser.add_argument('--num_val_samples', default=1024, type=int)
parser.add_argument('--shuffle_val', default=True, type=bool_flag)
parser.add_argument('--loader_num_workers', default=4, type=int)
parser.add_argument('--include_relationships', default=True, type=bool_flag)
# VG-specific options
parser.add_argument('--vg_image_dir', default=os.path.join(VG_DIR, 'images'))
parser.add_argument('--train_h5', default=os.path.join(VG_DIR, 'train.h5'))
parser.add_argument('--val_h5', default=os.path.join(VG_DIR, 'val.h5'))
parser.add_argument('--vocab_json', default=os.path.join(VG_DIR, 'vocab.json'))
parser.add_argument('--max_objects_per_image', default=10, type=int)
parser.add_argument('--vg_use_orphaned_objects', default=True, type=bool_flag)
# COCO-specific options
parser.add_argument('--coco_train_image_dir',
default=os.path.join(COCO_DIR, 'images/train2017'))
parser.add_argument('--coco_val_image_dir',
default=os.path.join(COCO_DIR, 'images/val2017'))
parser.add_argument('--coco_train_instances_json',
default=os.path.join(COCO_DIR, 'annotations/instances_train2017.json'))
parser.add_argument('--coco_train_stuff_json',
default=os.path.join(COCO_DIR, 'annotations/stuff_train2017.json'))
parser.add_argument('--coco_val_instances_json',
default=os.path.join(COCO_DIR, 'annotations/instances_val2017.json'))
parser.add_argument('--coco_val_stuff_json',
default=os.path.join(COCO_DIR, 'annotations/stuff_val2017.json'))
parser.add_argument('--instance_whitelist', default=None, type=str_tuple)
parser.add_argument('--stuff_whitelist', default=None, type=str_tuple)
parser.add_argument('--coco_include_other', default=False, type=bool_flag)
parser.add_argument('--min_object_size', default=0.02, type=float)
parser.add_argument('--min_objects_per_image', default=3, type=int)
parser.add_argument('--coco_stuff_only', default=True, type=bool_flag)
parser.add_argument('--max_lengths_for_image', default=1024, type=int)
# Generator options
parser.add_argument('--mask_size', default=16, type=int) # Set this to 0 to use no masks
parser.add_argument('--embedding_dim', default=128, type=int)
parser.add_argument('--gconv_dim', default=128, type=int)
parser.add_argument('--gconv_hidden_dim', default=512, type=int)
parser.add_argument('--gconv_num_layers', default=5, type=int)
parser.add_argument('--mlp_normalization', default='none', type=str)
parser.add_argument('--refinement_network_dims', default='1024,512,256,128,64', type=int_tuple)
parser.add_argument('--normalization', default='batch')
parser.add_argument('--activation', default='leakyrelu-0.2')
parser.add_argument('--layout_noise_dim', default=32, type=int)
parser.add_argument('--use_boxes_pred_after', default=-1, type=int)
# Generator losses
parser.add_argument('--mask_loss_weight', default=0, type=float)
parser.add_argument('--l1_pixel_loss_weight', default=1.0, type=float)
parser.add_argument('--bbox_pred_loss_weight', default=10, type=float)
parser.add_argument('--predicate_pred_loss_weight', default=0, type=float) # DEPRECATED
# Generic discriminator options
parser.add_argument('--discriminator_loss_weight', default=0.01, type=float)
parser.add_argument('--gan_loss_type', default='gan')
parser.add_argument('--d_clip', default=None, type=float)
parser.add_argument('--d_normalization', default='batch')
parser.add_argument('--d_padding', default='valid')
parser.add_argument('--d_activation', default='leakyrelu-0.2')
# Object discriminator
parser.add_argument('--d_obj_arch',
default='C4-64-2,C4-128-2,C4-256-2')
parser.add_argument('--crop_size', default=32, type=int)
parser.add_argument('--d_obj_weight', default=1.0, type=float) # multiplied by d_loss_weight
parser.add_argument('--ac_loss_weight', default=0.1, type=float)
# Image discriminator
parser.add_argument('--d_img_arch',
default='C4-64-2,C4-128-2,C4-256-2')
parser.add_argument('--d_img_weight', default=1.0, type=float) # multiplied by d_loss_weight
# Output options
parser.add_argument('--print_every', default=10, type=int)
parser.add_argument('--timing', default=False, type=bool_flag)
parser.add_argument('--checkpoint_every', default=10000, type=int)
parser.add_argument('--output_dir', default=os.getcwd())
parser.add_argument('--checkpoint_name', default='checkpoint')
parser.add_argument('--checkpoint_start_from', default=None)
parser.add_argument('--restore_from_checkpoint', default=False, type=bool_flag)
class VGDataModule(pl.LightningDataModule):
def __init__(self, args, tokenizer, num_workers=8):
super().__init__()
self.args = args
self.tokenizer = tokenizer
self.num_workers = num_workers
self.batch_size = args.batch_size
def setup(self, stage=None):
args = self.args
with open(args.vocab_json, 'r') as f:
vocab = json.load(f)
dset_kwargs = {
'vocab': vocab,
'h5_path': args.train_h5,
'image_dir': args.vg_image_dir,
'image_size': args.image_size,
'max_samples': args.num_train_samples,
'max_objects': args.max_objects_per_image,
'use_orphaned_objects': args.vg_use_orphaned_objects,
'include_relationships': args.include_relationships,
'max_lengths_for_image': args.max_lengths_for_image
}
train_dset = SequenceTransformerVgSceneGraphDataset(
**dset_kwargs, tokenizer=self.tokenizer
)
# iter_per_epoch = len(train_dset) // args.batch_size
# print('There are %d iterations per epoch' % iter_per_epoch)
dset_kwargs['h5_path'] = args.val_h5
del dset_kwargs['max_samples']
val_dset = SequenceTransformerVgSceneGraphDataset(
**dset_kwargs, tokenizer=self.tokenizer
)
self.train_dset = train_dset
self.val_dset = val_dset
def train_dataloader(self):
return DataLoader(
self.train_dset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True
)
def val_dataloader(self):
return DataLoader(self.val_dset, batch_size=self.batch_size, num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.val_dset, batch_size=self.batch_size, num_workers=self.num_workers)
class Discriminator(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = BertForSequenceClassification.from_pretrained(backbone)
def forward(self, *args, **kwargs):
outputs = self.backbone(*args, **kwargs)
return outputs["loss"]
def apply_word_embeddings(self, inputs):
"""
Because Gumbel softmax outputs cannot directly feed to huggingface model,
we have to compute the `input_embed` manually.
"""
word_embeddings = self.backbone.bert.embeddings.word_embeddings
return torch.matmul(inputs, word_embeddings.weight)
class Generator(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = EncoderDecoderModel.from_encoder_decoder_pretrained(
backbone, backbone, tie_encoder_decoder=True
)
def forward(self, *args, **kwargs):
return self.backbone(*args, **kwargs)
def forward_logits(self, *args, **kwargs):
return self.backbone(*args, **kwargs)["logits"]
def forward_loss(self, *args, **kwargs):
return self.backbone(*args, **kwargs)["loss"]
def apply_word_embeddings(self, inputs):
"""
Because Gumbel softmax outputs cannot directly feed to huggingface model,
we have to compute the `input_embed` manually.
"""
word_embeddings = self.backbone.encoder.embeddings.word_embeddings
return torch.matmul(inputs, word_embeddings.weight)
class GAN(pl.LightningModule):
def __init__(
self,
args,
tokenizer,
backbone=None,
):
super().__init__()
self.args = args
self.validation_z = torch.randn(8, 100)
self.tokenizer = tokenizer
self.discriminator = Discriminator(backbone)
self.generator = Generator(backbone)
self.graph_special_token = "[graph]"
self.image_special_token = "[image]"
self.tau = 1
self.image_token_id_list, self.text_token_id_list = self.retrieve_bad_image_text_tokens_ids()
def retrieve_bad_image_text_tokens_ids(self):
special_tokens_list = ["[CLS]", "[SEP]"]
image_tokens_list = [f"[itoken{i}]" for i in range(512)]
extra_image_tokens_list = [f"[itoken{i}]" for i in range(512, 32 * 32)]
vocab = self.tokenizer.get_vocab()
special_tokens_id_list = [vocab[token] for token in special_tokens_list]
image_token_id_list = [vocab[token] for token in image_tokens_list]
extra_image_tokens_id_list = [vocab[token] for token in extra_image_tokens_list]
text_token_id_list = [v for k, v in vocab.items()]
text_token_id_list = \
list(set(text_token_id_list) - set(image_token_id_list) - set(extra_image_tokens_id_list))
return image_token_id_list + extra_image_tokens_id_list, text_token_id_list + extra_image_tokens_id_list
def adversarial_loss(self, y_hat, y):
return F.binary_cross_entropy_with_logits(y_hat, y)
def training_step(self, batch, batch_idx, optimizer_idx):
# sample noise
# z = torch.randn(imgs.shape[0], self.hparams.latent_dim)
# z = z.type_as(imgs)
generator_batch = {
"input_ids": batch["sent_input/input_ids"],
"attention_mask": batch["sent_input/attention_mask"],
"decoder_input_ids": batch["code_output/input_ids"],
"decoder_attention_mask": batch["code_output/attention_mask"],
"labels": batch["code_output/input_ids"].clone()
}
# exlude the loss for padding tokens
generator_batch["labels"][generator_batch["labels"] == self.tokenizer.pad_token_id] = -100
# train generator
if optimizer_idx == 0:
logits = self.generator.forward_logits(**generator_batch)
predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)
# log sampled images
# sample_imgs = self.generated_imgs[:6]
# grid = torchvision.utils.make_grid(sample_imgs)
# self.logger.experiment.add_image('generated_images', grid, 0)
# ground truth result (ie: all fake)
# put on GPU because we created this tensor inside training_loop
predictions_embedding = self.generator.apply_word_embeddings(predictions)
fake_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"decoder_input_ids": batch["sent_output/input_ids"],
"decoder_attention_mask": batch["sent_output/attention_mask"],
"labels": batch["sent_output/input_ids"].clone()
}
fake_batch["labels"][fake_batch["labels"] == self.tokenizer.pad_token_id] = -100
ac_loss = self.generator.forward_loss(**fake_batch)
predictions_embedding = self.discriminator.apply_word_embeddings(predictions)
fake_dis_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"labels": torch.ones(predictions_embedding.shape[0]).type_as(predictions_embedding).long()
}
g_d_loss = self.discriminator(**fake_dis_batch)
g_loss = g_d_loss + ac_loss
# g_loss = ac_loss
self.log('g_ac_loss', ac_loss, prog_bar=True)
self.log('g_d_loss', g_d_loss, prog_bar=True)
# return {"loss": g_loss}
# train discriminator (inverse generator)
# if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
logits = self.generator.forward_logits(**generator_batch)
predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)
# don't compute the gradients of the generator
predictions = predictions.detach()
predictions_embedding = self.generator.apply_word_embeddings(predictions)
fake_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"decoder_input_ids": batch["sent_output/input_ids"],
"decoder_attention_mask": batch["sent_output/attention_mask"],
"labels": batch["sent_output/input_ids"].clone()
}
fake_batch["labels"][fake_batch["labels"] == self.tokenizer.pad_token_id] = -100
fake_ac_loss = self.generator.forward_loss(**fake_batch)
# For real data
real_batch = {
"input_ids": batch["code_output/input_ids"],
"attention_mask": batch["code_output/attention_mask"],
"decoder_input_ids": batch["sent_output/input_ids"],
"decoder_attention_mask": batch["sent_output/attention_mask"],
"labels": batch["sent_output/input_ids"].clone()
}
real_batch["labels"][real_batch["labels"] == self.tokenizer.pad_token_id] = -100
real_ac_loss = self.generator.forward_loss(**real_batch)
ac_loss = (real_ac_loss + fake_ac_loss) / 2
self.log('ac_loss', ac_loss, prog_bar=True)
# return {"loss": ac_loss}
return g_loss + ac_loss
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
logits = self.generator.forward_logits(**generator_batch)
# don't compute the gradients of the generator
predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)
predictions_embedding = self.discriminator.apply_word_embeddings(predictions)
fake_dis_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"labels": torch.zeros(predictions.shape[0]).type_as(predictions).long()
}
fake_loss = self.discriminator(**fake_dis_batch)
# fake = torch.zeros(fake_preds.shape)
# fake = fake.type_as(fake_preds)
# fake_loss = self.adversarial_loss(fake_preds, fake)
real_dis_batch = {
"input_ids": batch["code_output/input_ids"],
"attention_mask": batch["code_output/attention_mask"],
"labels": torch.ones(predictions.shape[0]).type_as(predictions).long()
}
real_loss = self.discriminator(**real_dis_batch)
# real = torch.ones(real_preds.shape)
# real = real.type_as(real_preds)
# real_loss = self.adversarial_loss(real_preds, real)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
self.log('d_loss', d_loss, prog_bar=True)
return d_loss
def configure_optimizers(self):
lr = self.args.learning_rate
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(0.5, 0.999))
opt_d = torch.optim.Adam(
self.discriminator.parameters(),
lr=lr,
betas=(0.5, 0.999)
)
return [opt_g, opt_d], []
# def on_epoch_end(self):
# z = self.validation_z.type_as(self.generator.model[0].weight)
# # log sampled images
# sample_imgs = self(z)
# grid = torchvision.utils.make_grid(sample_imgs)
# self.logger.experiment.add_image('generated_images', grid, self.current_epoch)
def test_step(self, batch, batch_idx):
pass
def inference(self, scene_graphs_json):
scene_graphs = self.read_scene_graphs(scene_graphs_json)
image_tokens_generation = self.generator.backbone.generate(
scene_graphs["input_ids"],
max_length=66,
# num_beams=5,
# no_repeat_ngram_size=2,
# early_stopping=True,
do_sample=True,
top_p=0.92,
top_k=0,
decoder_start_token_id=self.generator.backbone.config.decoder.pad_token_id,
bad_words_ids=[[ids] for ids in self.text_token_id_list],
)
print(image_tokens_generation)
output = []
for data in image_tokens_generation:
output.append(self.tokenizer.decode(data, skip_special_tokens=True))
print(output[-1])
reconstructed_graph = self.generator.backbone.generate(
image_tokens_generation,
max_length=64,
# num_beams=5,
# no_repeat_ngram_size=2,
# early_stopping=True,
do_sample=True,
top_p=0.92,
top_k=0,
decoder_start_token_id=self.generator.backbone.config.decoder.pad_token_id,
bad_words_ids=[[ids]for ids in self.image_token_id_list],
)
for data in reconstructed_graph:
print(self.tokenizer.decode(data, skip_special_tokens=True))
if not os.path.exists(self.args.output_dir):
os.makedirs(self.args.output_dir)
itokens_output_file = os.path.join(self.args.output_dir, "itokens_output.json")
with open(itokens_output_file, "w") as f:
json.dump(output, f, indent=2)
def read_scene_graphs(self, scene_graphs_json):
with open(scene_graphs_json, 'r') as f:
scene_graphs = json.load(f)
if isinstance(scene_graphs, dict):
# We just got a single scene graph, so promote it to a list
scene_graphs = [scene_graphs]
objs, triples, obj_to_img = [], [], []
obj_offset = 0
sents_list = []
for i, sg in enumerate(scene_graphs):
# Insert dummy __image__ object and __in_image__ relationships
sents = []
for s, p, o in sg['relationships']:
sent = f"{sg['objects'][s]} {p} {sg['objects'][o]}."
sents.append(sent)
sent = " ".join(sents)
sent = f"{self.graph_special_token} {sent} {self.image_special_token}"
sents_list.append(sent)
print(sent)
sent_tensor = self.tokenizer(
sents_list,
return_tensors="pt",
padding="max_length",
max_length=64,
truncation=True,
add_special_tokens=False
)
device = next(self.parameters()).device
sent_tensor = {k: v.to(device) for k, v in sent_tensor.items()}
return sent_tensor
def main(args):
backbone = "bert-base-uncased-itokens"
tokenizer = BertTokenizerFast.from_pretrained(backbone)
# encoder_decoder_config = EncoderDecoderConfig.from_pretrained("bert-base-uncased-itokens")
# model = EncoderDecoderModel.from_pretrained(
# "bert-base-uncased-itokens", config=encoder_decoder_config
# )
# model = EncoderDecoderModel.from_encoder_decoder_pretrained(
# "bert-base-uncased-itokens", "bert-base-uncased-itokens", tie_encoder_decoder=True
# )
# generator = Generator(model)
# discriminator = Discriminator(
# AutoModel.from_pretrained("bert-base-uncased-itokens")
# )
if args.test:
model = GAN.load_from_checkpoint(
args.load_checkpoint,
args=args,
tokenizer=tokenizer,
backbone=backbone
)
model.cuda()
model.eval()
model.inference(args.scene_graphs_json)
return
# train
if args.gpus > 1:
dm = VGDataModule(args, tokenizer, 2)
else:
dm = VGDataModule(args, tokenizer)
if args.load_checkpoint != "":
model = GAN.load_from_checkpoint(
args.load_checkpoint,
args=args,
tokenizer=tokenizer,
backbone=backbone
)
else:
model = GAN(args, tokenizer, backbone)
training_args = {
"gpus": args.gpus,
"fast_dev_run": False,
"max_steps": args.num_iterations,
"precision": 32,
"gradient_clip_val": 1,
}
if args.gpus > 1:
additional_args = {
"accelerator": "ddp",
"plugins": [DDPPlugin(find_unused_parameters=True)]
# "plugins": [my_ddp]
}
training_args.update(additional_args)
trainer = pl.Trainer(**training_args)
trainer.fit(model, dm)
if __name__ == "__main__":
args = parser.parse_args()
main(args)
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import math
import pandas
from pandas.testing import (
assert_series_equal,
assert_frame_equal,
assert_index_equal,
assert_extension_array_equal,
)
import modin.pandas as pd
from modin.utils import to_pandas
from modin.config import TestDatasetSize, TrackFileLeaks
from io import BytesIO
import os
from string import ascii_letters
import csv
import psutil
import functools
random_state = np.random.RandomState(seed=42)
DATASET_SIZE_DICT = {
"Small": (2 ** 2, 2 ** 3),
"Normal": (2 ** 6, 2 ** 8),
"Big": (2 ** 7, 2 ** 12),
}
# Size of test dataframes
NCOLS, NROWS = DATASET_SIZE_DICT.get(TestDatasetSize.get(), DATASET_SIZE_DICT["Normal"])
# Range for values for test data
RAND_LOW = 0
RAND_HIGH = 100
# Directory for storing I/O operations test data
IO_OPS_DATA_DIR = os.path.join(os.path.dirname(__file__), "io_tests_data")
# Input data and functions for the tests
# The test data that we will test our code against
test_data = {
# "empty_data": {},
# "columns_only": {"col1": [], "col2": [], "col3": [], "col4": [], "col5": []},
"int_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): random_state.randint(
RAND_LOW, RAND_HIGH, size=(NROWS)
)
for i in range(NCOLS)
},
"float_nan_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
x
if (j % 4 == 0 and i > NCOLS // 2) or (j != i and i <= NCOLS // 2)
else np.NaN
for j, x in enumerate(
random_state.uniform(RAND_LOW, RAND_HIGH, size=(NROWS))
)
]
for i in range(NCOLS)
},
# "int_float_object_data": {
# "col3": [1, 2, 3, 4],
# "col4": [4, 5, 6, 7],
# "col1": [8.0, 9.4, 10.1, 11.3],
# "col2": ["a", "b", "c", "d"],
# },
# "datetime_timedelta_data": {
# "col3": [
# np.datetime64("2010"),
# np.datetime64("2011"),
# np.datetime64("2011-06-15T00:00"),
# np.datetime64("2009-01-01"),
# ],
# "col4": [
# np.datetime64("2010"),
# np.datetime64("2011"),
# np.datetime64("2011-06-15T00:00"),
# np.datetime64("2009-01-01"),
# ],
# "col1": [
# np.timedelta64(1, "M"),
# np.timedelta64(2, "D"),
# np.timedelta64(3, "Y"),
# np.timedelta64(20, "D"),
# ],
# "col2": [
# np.timedelta64(1, "M"),
# np.timedelta64(2, "D"),
# np.timedelta64(3, "Y"),
# np.timedelta64(20, "D"),
# ],
# },
# "all_data": {
# "col3": 1.0,
# "col4": np.datetime64("2011-06-15T00:00"),
# "col5": np.array([3] * 4, dtype="int32"),
# "col1": "foo",
# "col2": True,
# },
}
# See details in #1403
test_data["int_data"]["index"] = test_data["int_data"].pop(
"col{}".format(int(NCOLS / 2))
)
for col in test_data["float_nan_data"]:
for row in range(NROWS // 2):
if row % 16 == 0:
test_data["float_nan_data"][col][row] = np.NaN
test_data_values = list(test_data.values())
test_data_keys = list(test_data.keys())
test_bool_data = {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): random_state.choice(
[True, False], size=(NROWS)
)
for i in range(NCOLS)
}
test_data_resample = {
"data": {"A": range(12), "B": range(12)},
"index": pandas.date_range("31/12/2000", periods=12, freq="H"),
}
test_data_with_duplicates = {
"no_duplicates": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): range(NROWS)
for i in range(NCOLS)
},
"all_duplicates": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
float(i) for _ in range(NROWS)
]
for i in range(NCOLS)
},
"some_duplicates": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
i if j % 7 == 0 else x for j, x in enumerate(range(NROWS))
]
for i in range(NCOLS)
},
"has_name_column": {
"name": ["one", "two", "two", "three"],
"col1": [1, 2, 2, 3],
"col3": [10, 20, 20, 3],
"col7": [100, 201, 200, 300],
},
"str_columns": {
"col_str{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
"s" + str(x % 5) for x in range(NROWS)
]
for i in range(NCOLS)
},
}
test_data_with_duplicates["float_nan"] = test_data["float_nan_data"]
test_data_small = {
"small": {
"col0": [1, 2, 3, 4],
"col1": [8.0, 9.4, 10.1, 11.3],
"col2": [4, 5, 6, 7],
}
}
test_data_diff_dtype = {
"int_col": [-5, 2, 7, 16],
"float_col": [np.NaN, -9.4, 10.1, np.NaN],
"str_col": ["a", np.NaN, "c", "d"],
"bool_col": [False, True, True, False],
}
test_data_small_values = list(test_data_small.values())
test_data_small_keys = list(test_data_small.keys())
test_data_with_duplicates_values = list(test_data_with_duplicates.values())
test_data_with_duplicates_keys = list(test_data_with_duplicates.keys())
test_data_categorical = {
"ordered": pandas.Categorical(list("testdata"), ordered=True),
"unordered": pandas.Categorical(list("testdata"), ordered=False),
}
test_data_categorical_values = list(test_data_categorical.values())
test_data_categorical_keys = list(test_data_categorical.keys())
numeric_dfs = [
"empty_data",
"columns_only",
"int_data",
"float_nan_data",
"with_index_column",
]
no_numeric_dfs = ["datetime_timedelta_data"]
# String test data
test_string_data = {
"separator data": [
"abC|DeF,Hik",
"234,3245.67",
"gSaf,qWer|Gre",
"asd3,4sad|",
np.NaN,
]
}
test_string_data_values = list(test_string_data.values())
test_string_data_keys = list(test_string_data.keys())
# List of strings test data
test_string_list_data = {"simple string": [["a"], ["CdE"], ["jDf"], ["werB"]]}
test_string_list_data_values = list(test_string_list_data.values())
test_string_list_data_keys = list(test_string_list_data.keys())
string_seperators = {"empty sep": "", "comma sep": ",", "None sep": None}
string_sep_values = list(string_seperators.values())
string_sep_keys = list(string_seperators.keys())
string_na_rep = {"None na_rep": None, "- na_rep": "-", "nan na_rep": np.NaN}
string_na_rep_values = list(string_na_rep.values())
string_na_rep_keys = list(string_na_rep.keys())
join_type = {"left": "left", "right": "right", "inner": "inner", "outer": "outer"}
join_type_keys = list(join_type.keys())
join_type_values = list(join_type.values())
# Test functions for applymap
test_func = {
"plus one": lambda x: x + 1,
"convert to string": lambda x: str(x),
"square": lambda x: x * x,
"identity": lambda x: x,
"return false": lambda x: False,
}
test_func_keys = list(test_func.keys())
test_func_values = list(test_func.values())
numeric_test_funcs = ["plus one", "square"]
# Test functions for query
query_func = {
"col1 < col2": "col1 < col2",
"col3 > col4": "col3 > col4",
"col1 == col2": "col1 == col2",
"(col2 > col1) and (col1 < col3)": "(col2 > col1) and (col1 < col3)",
}
query_func_keys = list(query_func.keys())
query_func_values = list(query_func.values())
# Test agg functions for apply, agg, and aggregate
agg_func = {
"sum": "sum",
"df sum": lambda df: df.sum(),
"str": str,
"sum mean": ["sum", "mean"],
"sum df sum": ["sum", lambda df: df.sum()],
"should raise TypeError": 1,
}
agg_func_keys = list(agg_func.keys())
agg_func_values = list(agg_func.values())
# For this sort of parameters pandas throws an exception.
# See details in pandas issue 36036.
agg_func_except = {
"sum sum": ["sum", "sum"],
}
agg_func_except_keys = list(agg_func_except.keys())
agg_func_except_values = list(agg_func_except.values())
numeric_agg_funcs = ["sum mean", "sum sum", "sum df sum"]
udf_func = {
"return self": lambda df: lambda x, *args, **kwargs: type(x)(x.values),
"change index": lambda df: lambda x, *args, **kwargs: pandas.Series(
x.values, index=np.arange(-1, len(x.index) - 1)
),
"return none": lambda df: lambda x, *args, **kwargs: None,
"return empty": lambda df: lambda x, *args, **kwargs: pandas.Series(),
"access self": lambda df: lambda x, other, *args, **kwargs: pandas.Series(
x.values, index=other.index
),
}
udf_func_keys = list(udf_func.keys())
udf_func_values = list(udf_func.values())
# Test q values for quantiles
quantiles = {
"0.25": 0.25,
"0.5": 0.5,
"0.75": 0.75,
"0.66": 0.66,
"0.01": 0.01,
"list": [0.25, 0.5, 0.75, 0.66, 0.01],
}
quantiles_keys = list(quantiles.keys())
quantiles_values = list(quantiles.values())
# Test indices for get, set_index, __contains__, insert
indices = {
"col1": "col1",
"col2": "col2",
"A": "A",
"B": "B",
"does not exist": "does not exist",
}
indices_keys = list(indices.keys())
indices_values = list(indices.values())
# Test functions for groupby apply
groupby_apply_func = {"sum": lambda df: df.sum(), "negate": lambda df: -df}
groupby_apply_func_keys = list(groupby_apply_func.keys())
groupby_apply_func_values = list(groupby_apply_func.values())
# Test functions for groupby agg
groupby_agg_func = {"min": "min", "max": "max"}
groupby_agg_func_keys = list(groupby_agg_func.keys())
groupby_agg_func_values = list(groupby_agg_func.values())
# Test functions for groupby transform
groupby_transform_func = {
"add 4": lambda df: df + 4,
"negatie and minus 10": lambda df: -df - 10,
}
groupby_transform_func_keys = list(groupby_transform_func.keys())
groupby_transform_func_values = list(groupby_transform_func.values())
# Test functions for groupby pipe
groupby_pipe_func = {"sum": lambda df: df.sum()}
groupby_pipe_func_keys = list(groupby_pipe_func.keys())
groupby_pipe_func_values = list(groupby_pipe_func.values())
# END Test input data and functions
# Parametrizations of common kwargs
axis = {
"over_rows_int": 0,
"over_rows_str": "rows",
"over_columns_int": 1,
"over_columns_str": "columns",
}
axis_keys = list(axis.keys())
axis_values = list(axis.values())
bool_arg = {"True": True, "False": False, "None": None}
bool_arg_keys = list(bool_arg.keys())
bool_arg_values = list(bool_arg.values())
int_arg = {"-5": -5, "-1": -1, "0": 0, "1": 1, "5": 5}
int_arg_keys = list(int_arg.keys())
int_arg_values = list(int_arg.values())
# END parametrizations of common kwargs
json_short_string = """[{"project": "modin"}]"""
json_long_string = """{
"quiz": {
"sport": {
"q1": {
"question": "Which one is correct team name in NBA?",
"options": [
"New York Bulls",
"Los Angeles Kings",
"Golden State Warriros",
"Huston Rocket"
],
"answer": "Huston Rocket"
}
},
"maths": {
"q1": {
"question": "5 + 7 = ?",
"options": [
"10",
"11",
"12",
"13"
],
"answer": "12"
},
"q2": {
"question": "12 - 8 = ?",
"options": [
"1",
"2",
"3",
"4"
],
"answer": "4"
}
}
}
}"""
json_long_bytes = BytesIO(json_long_string.encode(encoding="UTF-8"))
json_short_bytes = BytesIO(json_short_string.encode(encoding="UTF-8"))
# Text encoding types
encoding_types = [
"ascii",
"utf_32",
"utf_32_be",
"utf_32_le",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
"utf_8_sig",
]
# raising of this exceptions can be caused by unexpected behavior
# of I/O operation test, but can passed by eval_io function since
# the type of this exceptions are the same
io_ops_bad_exc = [TypeError, FileNotFoundError]
# Files compression to extension mapping
COMP_TO_EXT = {"gzip": "gz", "bz2": "bz2", "xz": "xz", "zip": "zip"}
def categories_equals(left, right):
assert (left.ordered and right.ordered) or (not left.ordered and not right.ordered)
assert_extension_array_equal(left, right)
def df_categories_equals(df1, df2):
if not hasattr(df1, "select_dtypes"):
if isinstance(df1, pandas.CategoricalDtype):
return categories_equals(df1, df2)
elif isinstance(getattr(df1, "dtype"), pandas.CategoricalDtype) and isinstance(
getattr(df1, "dtype"), pandas.CategoricalDtype
):
return categories_equals(df1.dtype, df2.dtype)
else:
return True
categories_columns = df1.select_dtypes(include="category").columns
for column in categories_columns:
assert_extension_array_equal(
df1[column].values,
df2[column].values,
check_dtype=False,
)
def df_equals(df1, df2):
"""Tests if df1 and df2 are equal.
Args:
df1: (pandas or modin DataFrame or series) dataframe to test if equal.
df2: (pandas or modin DataFrame or series) dataframe to test if equal.
Returns:
True if df1 is equal to df2.
"""
# Gets AttributError if modin's groupby object is not import like this
from modin.pandas.groupby import DataFrameGroupBy
groupby_types = (pandas.core.groupby.DataFrameGroupBy, DataFrameGroupBy)
# The typing behavior of how pandas treats its index is not consistent when the
# length of the DataFrame or Series is 0, so we just verify that the contents are
# the same.
if (
hasattr(df1, "index")
and hasattr(df2, "index")
and len(df1) == 0
and len(df2) == 0
):
if type(df1).__name__ == type(df2).__name__:
if hasattr(df1, "name") and hasattr(df2, "name") and df1.name == df2.name:
return
if (
hasattr(df1, "columns")
and hasattr(df2, "columns")
and df1.columns.equals(df2.columns)
):
return
assert False
if isinstance(df1, (list, tuple)) and all(
isinstance(d, (pd.DataFrame, pd.Series, pandas.DataFrame, pandas.Series))
for d in df1
):
assert isinstance(df2, type(df1)), "Different type of collection"
assert len(df1) == len(df2), "Different length result"
return (df_equals(d1, d2) for d1, d2 in zip(df1, df2))
# Convert to pandas
if isinstance(df1, (pd.DataFrame, pd.Series)):
df1 = to_pandas(df1)
if isinstance(df2, (pd.DataFrame, pd.Series)):
df2 = to_pandas(df2)
if isinstance(df1, pandas.DataFrame) and isinstance(df2, pandas.DataFrame):
if (df1.empty and not df2.empty) or (df2.empty and not df1.empty):
assert False, "One of the passed frames is empty, when other isn't"
elif df1.empty and df2.empty and type(df1) != type(df2):
assert (
False
), f"Empty frames have different types: {type(df1)} != {type(df2)}"
if isinstance(df1, pandas.DataFrame) and isinstance(df2, pandas.DataFrame):
assert_frame_equal(
df1,
df2,
check_dtype=False,
check_datetimelike_compat=True,
check_index_type=False,
check_column_type=False,
check_categorical=False,
)
df_categories_equals(df1, df2)
elif isinstance(df1, pandas.Index) and isinstance(df2, pandas.Index):
assert_index_equal(df1, df2)
elif isinstance(df1, pandas.Series) and isinstance(df2, pandas.Series):
assert_series_equal(df1, df2, check_dtype=False, check_series_type=False)
elif isinstance(df1, groupby_types) and isinstance(df2, groupby_types):
for g1, g2 in zip(df1, df2):
assert g1[0] == g2[0]
df_equals(g1[1], g2[1])
elif (
isinstance(df1, pandas.Series)
and isinstance(df2, pandas.Series)
and df1.empty
and df2.empty
):
assert all(df1.index == df2.index)
assert df1.dtypes == df2.dtypes
elif isinstance(df1, pandas.core.arrays.numpy_.PandasArray):
assert isinstance(df2, pandas.core.arrays.numpy_.PandasArray)
assert df1 == df2
elif isinstance(df1, np.recarray) and isinstance(df2, np.recarray):
np.testing.assert_array_equal(df1, df2)
else:
if df1 != df2:
np.testing.assert_almost_equal(df1, df2)
def modin_df_almost_equals_pandas(modin_df, pandas_df):
df_categories_equals(modin_df._to_pandas(), pandas_df)
modin_df = to_pandas(modin_df)
if hasattr(modin_df, "select_dtypes"):
modin_df = modin_df.select_dtypes(exclude=["category"])
if hasattr(pandas_df, "select_dtypes"):
pandas_df = pandas_df.select_dtypes(exclude=["category"])
difference = modin_df - pandas_df
diff_max = difference.max()
if isinstance(diff_max, pandas.Series):
diff_max = diff_max.max()
assert (
modin_df.equals(pandas_df)
or diff_max < 0.0001
or (all(modin_df.isna().all()) and all(pandas_df.isna().all()))
)
def df_is_empty(df):
"""Tests if df is empty.
Args:
df: (pandas or modin DataFrame) dataframe to test if empty.
Returns:
True if df is empty.
"""
assert df.size == 0 and df.empty
assert df.shape[0] == 0 or df.shape[1] == 0
def arg_keys(arg_name, keys):
"""Appends arg_name to the front of all values in keys.
Args:
arg_name: (string) String containing argument name.
keys: (list of strings) Possible inputs of argument.
Returns:
List of strings with arg_name append to front of keys.
"""
return ["{0}_{1}".format(arg_name, key) for key in keys]
def name_contains(test_name, vals):
"""Determines if any string in vals is a substring of test_name.
Args:
test_name: (string) String to determine if contains substrings.
vals: (list of strings) List of substrings to test for.
Returns:
True if a substring in vals is in test_name, else False.
"""
return any(val in test_name for val in vals)
def check_df_columns_have_nans(df, cols):
"""Checks if there are NaN values in specified columns of a dataframe.
:param df: Dataframe to check.
:param cols: One column name or list of column names.
:return:
True if specified columns of dataframe contains NaNs.
"""
return (
pandas.api.types.is_list_like(cols)
and (
any(isinstance(x, str) and x in df.columns and df[x].hasnans for x in cols)
or any(
isinstance(x, pd.Series) and x._parent is df and x.hasnans for x in cols
)
)
) or (
not pandas.api.types.is_list_like(cols)
and cols in df.columns
and df[cols].hasnans
)
def eval_general(
modin_df,
pandas_df,
operation,
comparator=df_equals,
__inplace__=False,
check_exception_type=True,
raising_exceptions=None,
check_kwargs_callable=True,
md_extra_kwargs=None,
**kwargs,
):
if raising_exceptions:
assert (
check_exception_type
), "if raising_exceptions is not None or False, check_exception_type should be True"
md_kwargs, pd_kwargs = {}, {}
def execute_callable(fn, inplace=False, md_kwargs={}, pd_kwargs={}):
try:
pd_result = fn(pandas_df, **pd_kwargs)
except Exception as pd_e:
if check_exception_type is None:
return None
with pytest.raises(Exception) as md_e:
# repr to force materialization
repr(fn(modin_df, **md_kwargs))
if check_exception_type:
assert isinstance(md_e.value, type(pd_e))
if raising_exceptions:
assert not isinstance(
md_e.value, tuple(raising_exceptions)
), f"not acceptable exception type: {md_e.value}"
else:
md_result = fn(modin_df, **md_kwargs)
return (md_result, pd_result) if not __inplace__ else (modin_df, pandas_df)
for key, value in kwargs.items():
if check_kwargs_callable and callable(value):
values = execute_callable(value)
# that means, that callable raised an exception
if values is None:
return
else:
md_value, pd_value = values
else:
md_value, pd_value = value, value
md_kwargs[key] = md_value
pd_kwargs[key] = pd_value
if md_extra_kwargs:
assert isinstance(md_extra_kwargs, dict)
md_kwargs.update(md_extra_kwargs)
values = execute_callable(
operation, md_kwargs=md_kwargs, pd_kwargs=pd_kwargs, inplace=__inplace__
)
if values is not None:
comparator(*values)
def eval_io(
fn_name,
comparator=df_equals,
cast_to_str=False,
check_exception_type=True,
raising_exceptions=io_ops_bad_exc,
check_kwargs_callable=True,
modin_warning=None,
md_extra_kwargs=None,
*args,
**kwargs,
):
"""Evaluate I/O operation outputs equality check.
Parameters
----------
fn_name: str
I/O operation name ("read_csv" for example).
comparator: obj
Function to perform comparison.
cast_to_str: bool
There could be some missmatches in dtypes, so we're
casting the whole frame to `str` before comparison.
See issue #1931 for details.
check_exception_type: bool
Check or not exception types in the case of operation fail
(compare exceptions types raised by Pandas and Modin).
raising_exceptions: Exception or list of Exceptions
Exceptions that should be raised even if they are raised
both by Pandas and Modin (check evaluated only if
`check_exception_type` passed as `True`).
modin_warning: obj
Warning that should be raised by Modin.
md_extra_kwargs: dict
Modin operation specific kwargs.
"""
def applyier(module, *args, **kwargs):
result = getattr(module, fn_name)(*args, **kwargs)
if cast_to_str:
result = result.astype(str)
return result
def call_eval_general():
eval_general(
pd,
pandas,
applyier,
check_exception_type=check_exception_type,
raising_exceptions=raising_exceptions,
check_kwargs_callable=check_kwargs_callable,
md_extra_kwargs=md_extra_kwargs,
*args,
**kwargs,
)
if modin_warning:
with pytest.warns(modin_warning):
call_eval_general()
else:
call_eval_general()
def eval_io_from_str(csv_str: str, unique_filename: str, **kwargs):
"""Evaluate I/O operation outputs equality check by using `csv_str`
data passed as python str (csv test file will be created from `csv_str`).
Parameters
----------
csv_str: str
Test data for storing to csv file.
unique_filename: str
csv file name.
"""
try:
with open(unique_filename, "w") as f:
f.write(csv_str)
eval_io(
filepath_or_buffer=unique_filename,
fn_name="read_csv",
**kwargs,
)
finally:
if os.path.exists(unique_filename):
try:
os.remove(unique_filename)
except PermissionError:
pass
def create_test_dfs(*args, **kwargs):
post_fn = kwargs.pop("post_fn", lambda df: df)
return map(
post_fn, [pd.DataFrame(*args, **kwargs), pandas.DataFrame(*args, **kwargs)]
)
def generate_dfs():
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
df2 = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col6": [12, 13, 14, 15],
"col7": [0, 0, 0, 0],
}
)
return df, df2
def generate_multiindex_dfs(axis=1):
def generate_multiindex(index):
return pandas.MultiIndex.from_tuples(
[("a", x) for x in index.values], names=["name1", "name2"]
)
df1, df2 = generate_dfs()
df1.axes[axis], df2.axes[axis] = map(
generate_multiindex, [df1.axes[axis], df2.axes[axis]]
)
return df1, df2
def generate_multiindex(elements_number, nlevels=2, is_tree_like=False):
def generate_level(length, nlevel):
src = ["bar", "baz", "foo", "qux"]
return [src[i % len(src)] + f"-{nlevel}-{i}" for i in range(length)]
if is_tree_like:
for penalty_level in [0, 1]:
lvl_len_f, lvl_len_d = math.modf(
round(elements_number ** (1 / (nlevels - penalty_level)), 12)
)
if lvl_len_d >= 2 and lvl_len_f == 0:
break
if lvl_len_d < 2 or lvl_len_f != 0:
raise RuntimeError(
f"Can't generate Tree-like MultiIndex with lenght: {elements_number} and number of levels: {nlevels}"
)
lvl_len = int(lvl_len_d)
result = pd.MultiIndex.from_product(
[generate_level(lvl_len, i) for i in range(nlevels - penalty_level)],
names=[f"level-{i}" for i in range(nlevels - penalty_level)],
)
if penalty_level:
result = pd.MultiIndex.from_tuples(
[("base_level", *ml_tuple) for ml_tuple in result],
names=[f"level-{i}" for i in range(nlevels)],
)
return result.sort_values()
else:
base_level = ["first"] * (elements_number // 2 + elements_number % 2) + [
"second"
] * (elements_number // 2)
primary_levels = [generate_level(elements_number, i) for i in range(1, nlevels)]
arrays = [base_level] + primary_levels
return pd.MultiIndex.from_tuples(
list(zip(*arrays)), names=[f"level-{i}" for i in range(nlevels)]
).sort_values()
def generate_none_dfs():
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, None, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [None, None, None, None],
}
)
df2 = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col6": [12, 13, 14, 15],
"col7": [0, 0, 0, 0],
}
)
return df, df2
def get_unique_filename(
test_name: str = "test",
kwargs: dict = {},
extension: str = "csv",
data_dir: str = IO_OPS_DATA_DIR,
suffix: str = "",
debug_mode=False,
):
"""Returns unique file name with specified parameters.
Parameters
----------
test_name: str
name of the test for which the unique file name is needed.
kwargs: list of ints
Unique combiantion of test parameters for creation of unique name.
extension: str
Extension of unique file.
data_dir: str
Data directory where test files will be created.
suffix: str
String to append to the resulted name.
debug_mode: bool
Get unique filename containing kwargs values.
Otherwise kwargs values will be replaced with hash equivalent.
Returns
-------
Unique file name.
"""
suffix_part = f"_{suffix}" if suffix else ""
extension_part = f".{extension}" if extension else ""
if debug_mode:
# shortcut if kwargs parameter are not provided
if len(kwargs) == 0 and extension == "csv" and suffix == "":
return os.path.join(data_dir, (test_name + suffix_part + f".{extension}"))
assert "." not in extension, "please provide pure extension name without '.'"
prohibited_chars = ['"', "\n"]
non_prohibited_char = "np_char"
char_counter = 0
kwargs_name = dict(kwargs)
for key, value in kwargs_name.items():
for char in prohibited_chars:
if isinstance(value, str) and char in value or callable(value):
kwargs_name[key] = non_prohibited_char + str(char_counter)
char_counter += 1
parameters_values = "_".join(
[
str(value)
if not isinstance(value, (list, tuple))
else "_".join([str(x) for x in value])
for value in kwargs_name.values()
]
)
return os.path.join(
data_dir, test_name + parameters_values + suffix_part + extension_part
)
else:
import uuid
return os.path.join(data_dir, uuid.uuid1().hex + suffix_part + extension_part)
def get_random_string():
random_string = "".join(
random_state.choice([x for x in ascii_letters], size=10).tolist()
)
return random_string
def insert_lines_to_csv(
csv_name: str,
lines_positions: list,
lines_type: str = "blank",
encoding: str = None,
**csv_reader_writer_params,
):
"""Insert lines to ".csv" file.
Parameters
----------
csv_name: str
".csv" file that should be modified.
lines_positions: list of ints
Lines postions that sghould be modified (serial number
of line - begins from 0, ends in <rows_number> - 1).
lines_type: str
Lines types that should be inserted to ".csv" file. Possible types:
"blank" - empty line without any delimiters/separators,
"bad" - lines with len(lines_data) > cols_number
encoding: str
Encoding type that should be used during file reading and writing.
"""
cols_number = len(pandas.read_csv(csv_name, nrows=1).columns)
if lines_type == "blank":
lines_data = []
elif lines_type == "bad":
cols_number = len(pandas.read_csv(csv_name, nrows=1).columns)
lines_data = [x for x in range(cols_number + 1)]
else:
raise ValueError(
f"acceptable values for parameter are ['blank', 'bad'], actually passed {lines_type}"
)
lines = []
dialect = "excel"
with open(csv_name, "r", encoding=encoding, newline="") as read_file:
try:
dialect = csv.Sniffer().sniff(read_file.read())
read_file.seek(0)
except Exception:
dialect = None
reader = csv.reader(
read_file,
dialect=dialect if dialect is not None else "excel",
**csv_reader_writer_params,
)
counter = 0
for row in reader:
if counter in lines_positions:
lines.append(lines_data)
else:
lines.append(row)
counter += 1
with open(csv_name, "w", encoding=encoding, newline="") as write_file:
writer = csv.writer(
write_file,
dialect=dialect if dialect is not None else "excel",
**csv_reader_writer_params,
)
writer.writerows(lines)
def _get_open_files():
"""
psutil open_files() can return a lot of extra information that we can allow to
be different, like file position; for simplicity we care about path and fd only.
"""
return sorted((info.path, info.fd) for info in psutil.Process().open_files())
def check_file_leaks(func):
"""
A decorator that ensures that no *newly* opened file handles are left
after decorated function is finished.
"""
if not TrackFileLeaks.get():
return func
@functools.wraps(func)
def check(*a, **kw):
fstart = _get_open_files()
try:
return func(*a, **kw)
finally:
leaks = []
for item in _get_open_files():
try:
fstart.remove(item)
except ValueError:
# ignore files in /proc/, as they have nothing to do with
# modin reading any data (and this is what we care about)
if not item[0].startswith("/proc/"):
leaks.append(item)
assert (
not leaks
), f"Unexpected open handles left for: {", ".join(item[0] for item in leaks)}"
return check
def dummy_decorator():
"""A problematic decorator that does not use `functools.wraps`. This introduces unwanted local variables for
inspect.currentframe. This decorator is used in test_io to test `read_csv` and `read_table`
"""
def wrapper(method):
def wrapped_function(self, *args, **kwargs):
result = method(self, *args, **kwargs)
return result
return wrapped_function
return wrapper
def generate_dataframe(row_size=NROWS, additional_col_values=None):
dates = pandas.date_range("2000", freq="h", periods=row_size)
data = {
"col1": np.arange(row_size) * 10,
"col2": [str(x.date()) for x in dates],
"col3": np.arange(row_size) * 10,
"col4": [str(x.time()) for x in dates],
"col5": [get_random_string() for _ in range(row_size)],
"col6": random_state.uniform(low=0.0, high=10000.0, size=row_size),
}
if additional_col_values is not None:
assert isinstance(additional_col_values, (list, tuple))
data.update(
{
"col7": random_state.choice(additional_col_values, size=row_size),
}
)
return pandas.DataFrame(data)
def _make_csv_file(filenames):
def _csv_file_maker(
filename,
row_size=NROWS,
force=True,
delimiter=",",
encoding=None,
compression="infer",
additional_col_values=None,
remove_randomness=False,
add_blank_lines=False,
add_bad_lines=False,
add_nan_lines=False,
thousands_separator=None,
decimal_separator=None,
comment_col_char=None,
quoting=csv.QUOTE_MINIMAL,
quotechar='"',
doublequote=True,
escapechar=None,
line_terminator=None,
):
if os.path.exists(filename) and not force:
pass
else:
df = generate_dataframe(row_size, additional_col_values)
if remove_randomness:
df = df[["col1", "col2", "col3", "col4"]]
if add_nan_lines:
for i in range(0, row_size, row_size // (row_size // 10)):
df.loc[i] = pandas.Series()
if comment_col_char:
char = comment_col_char if isinstance(comment_col_char, str) else "#"
df.insert(
loc=0,
column="col_with_comments",
value=[char if (x + 2) == 0 else x for x in range(row_size)],
)
if thousands_separator:
for col_id in ["col1", "col3"]:
df[col_id] = df[col_id].apply(
lambda x: f"{x:,d}".replace(",", thousands_separator)
)
df["col6"] = df["col6"].apply(
lambda x: f"{x:,f}".replace(",", thousands_separator)
)
filename = (
f"{filename}.{COMP_TO_EXT[compression]}"
if compression != "infer"
else filename
)
df.to_csv(
filename,
sep=delimiter,
encoding=encoding,
compression=compression,
index=False,
decimal=decimal_separator if decimal_separator else ".",
line_terminator=line_terminator,
quoting=quoting,
quotechar=quotechar,
doublequote=doublequote,
escapechar=escapechar,
)
csv_reader_writer_params = {
"delimiter": delimiter,
"doublequote": doublequote,
"escapechar": escapechar,
"lineterminator": line_terminator if line_terminator else os.linesep,
"quotechar": quotechar,
"quoting": quoting,
}
if add_blank_lines:
insert_lines_to_csv(
csv_name=filename,
lines_positions=[
x for x in range(5, row_size, row_size // (row_size // 10))
],
lines_type="blank",
encoding=encoding,
**csv_reader_writer_params,
)
if add_bad_lines:
insert_lines_to_csv(
csv_name=filename,
lines_positions=[
x for x in range(6, row_size, row_size // (row_size // 10))
],
lines_type="bad",
encoding=encoding,
**csv_reader_writer_params,
)
filenames.append(filename)
return df
return _csv_file_maker
def teardown_test_file(test_path):
if os.path.exists(test_path):
# PermissionError can occure because of issue #2533
try:
os.remove(test_path)
except PermissionError:
pass
def teardown_test_files(test_paths: list):
for path in test_paths:
teardown_test_file(path)
def sort_index_for_equal_values(series, ascending=False):
if series.index.dtype == np.float64:
# HACK: workaround for pandas bug:
# https://github.com/pandas-dev/pandas/issues/34455
series.index = series.index.astype("str")
res = series.groupby(series, sort=False).apply(
lambda df: df.sort_index(ascending=ascending)
)
if res.index.nlevels > series.index.nlevels:
# Sometimes GroupBy adds an extra level with 'by' to the result index.
# GroupBy is very inconsistent about when it's doing this, so that's
# why this clumsy if-statement is used.
res.index = res.index.droplevel(0)
res.name = series.name
return res
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import math
import pandas
from pandas.testing import (
assert_series_equal,
assert_frame_equal,
assert_index_equal,
assert_extension_array_equal,
)
import modin.pandas as pd
from modin.utils import to_pandas
from modin.config import TestDatasetSize, TrackFileLeaks
from io import BytesIO
import os
from string import ascii_letters
import csv
import psutil
import functools
random_state = np.random.RandomState(seed=42)
DATASET_SIZE_DICT = {
"Small": (2 ** 2, 2 ** 3),
"Normal": (2 ** 6, 2 ** 8),
"Big": (2 ** 7, 2 ** 12),
}
# Size of test dataframes
NCOLS, NROWS = DATASET_SIZE_DICT.get(TestDatasetSize.get(), DATASET_SIZE_DICT["Normal"])
# Range for values for test data
RAND_LOW = 0
RAND_HIGH = 100
# Directory for storing I/O operations test data
IO_OPS_DATA_DIR = os.path.join(os.path.dirname(__file__), "io_tests_data")
# Input data and functions for the tests
# The test data that we will test our code against
test_data = {
# "empty_data": {},
# "columns_only": {"col1": [], "col2": [], "col3": [], "col4": [], "col5": []},
"int_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): random_state.randint(
RAND_LOW, RAND_HIGH, size=(NROWS)
)
for i in range(NCOLS)
},
"float_nan_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
x
if (j % 4 == 0 and i > NCOLS // 2) or (j != i and i <= NCOLS // 2)
else np.NaN
for j, x in enumerate(
random_state.uniform(RAND_LOW, RAND_HIGH, size=(NROWS))
)
]
for i in range(NCOLS)
},
# "int_float_object_data": {
# "col3": [1, 2, 3, 4],
# "col4": [4, 5, 6, 7],
# "col1": [8.0, 9.4, 10.1, 11.3],
# "col2": ["a", "b", "c", "d"],
# },
# "datetime_timedelta_data": {
# "col3": [
# np.datetime64("2010"),
# np.datetime64("2011"),
# np.datetime64("2011-06-15T00:00"),
# np.datetime64("2009-01-01"),
# ],
# "col4": [
# np.datetime64("2010"),
# np.datetime64("2011"),
# np.datetime64("2011-06-15T00:00"),
# np.datetime64("2009-01-01"),
# ],
# "col1": [
# np.timedelta64(1, "M"),
# np.timedelta64(2, "D"),
# np.timedelta64(3, "Y"),
# np.timedelta64(20, "D"),
# ],
# "col2": [
# np.timedelta64(1, "M"),
# np.timedelta64(2, "D"),
# np.timedelta64(3, "Y"),
# np.timedelta64(20, "D"),
# ],
# },
# "all_data": {
# "col3": 1.0,
# "col4": np.datetime64("2011-06-15T00:00"),
# "col5": np.array([3] * 4, dtype="int32"),
# "col1": "foo",
# "col2": True,
# },
}
# See details in #1403
test_data["int_data"]["index"] = test_data["int_data"].pop(
"col{}".format(int(NCOLS / 2))
)
for col in test_data["float_nan_data"]:
for row in range(NROWS // 2):
if row % 16 == 0:
test_data["float_nan_data"][col][row] = np.NaN
test_data_values = list(test_data.values())
test_data_keys = list(test_data.keys())
test_bool_data = {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): random_state.choice(
[True, False], size=(NROWS)
)
for i in range(NCOLS)
}
test_data_resample = {
"data": {"A": range(12), "B": range(12)},
"index": pandas.date_range("31/12/2000", periods=12, freq="H"),
}
test_data_with_duplicates = {
"no_duplicates": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): range(NROWS)
for i in range(NCOLS)
},
"all_duplicates": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
float(i) for _ in range(NROWS)
]
for i in range(NCOLS)
},
"some_duplicates": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
i if j % 7 == 0 else x for j, x in enumerate(range(NROWS))
]
for i in range(NCOLS)
},
"has_name_column": {
"name": ["one", "two", "two", "three"],
"col1": [1, 2, 2, 3],
"col3": [10, 20, 20, 3],
"col7": [100, 201, 200, 300],
},
"str_columns": {
"col_str{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
"s" + str(x % 5) for x in range(NROWS)
]
for i in range(NCOLS)
},
}
test_data_with_duplicates["float_nan"] = test_data["float_nan_data"]
test_data_small = {
"small": {
"col0": [1, 2, 3, 4],
"col1": [8.0, 9.4, 10.1, 11.3],
"col2": [4, 5, 6, 7],
}
}
test_data_diff_dtype = {
"int_col": [-5, 2, 7, 16],
"float_col": [np.NaN, -9.4, 10.1, np.NaN],
"str_col": ["a", np.NaN, "c", "d"],
"bool_col": [False, True, True, False],
}
test_data_small_values = list(test_data_small.values())
test_data_small_keys = list(test_data_small.keys())
test_data_with_duplicates_values = list(test_data_with_duplicates.values())
test_data_with_duplicates_keys = list(test_data_with_duplicates.keys())
test_data_categorical = {
"ordered": pandas.Categorical(list("testdata"), ordered=True),
"unordered": pandas.Categorical(list("testdata"), ordered=False),
}
test_data_categorical_values = list(test_data_categorical.values())
test_data_categorical_keys = list(test_data_categorical.keys())
numeric_dfs = [
"empty_data",
"columns_only",
"int_data",
"float_nan_data",
"with_index_column",
]
no_numeric_dfs = ["datetime_timedelta_data"]
# String test data
test_string_data = {
"separator data": [
"abC|DeF,Hik",
"234,3245.67",
"gSaf,qWer|Gre",
"asd3,4sad|",
np.NaN,
]
}
test_string_data_values = list(test_string_data.values())
test_string_data_keys = list(test_string_data.keys())
# List of strings test data
test_string_list_data = {"simple string": [["a"], ["CdE"], ["jDf"], ["werB"]]}
test_string_list_data_values = list(test_string_list_data.values())
test_string_list_data_keys = list(test_string_list_data.keys())
string_seperators = {"empty sep": "", "comma sep": ",", "None sep": None}
string_sep_values = list(string_seperators.values())
string_sep_keys = list(string_seperators.keys())
string_na_rep = {"None na_rep": None, "- na_rep": "-", "nan na_rep": np.NaN}
string_na_rep_values = list(string_na_rep.values())
string_na_rep_keys = list(string_na_rep.keys())
join_type = {"left": "left", "right": "right", "inner": "inner", "outer": "outer"}
join_type_keys = list(join_type.keys())
join_type_values = list(join_type.values())
# Test functions for applymap
test_func = {
"plus one": lambda x: x + 1,
"convert to string": lambda x: str(x),
"square": lambda x: x * x,
"identity": lambda x: x,
"return false": lambda x: False,
}
test_func_keys = list(test_func.keys())
test_func_values = list(test_func.values())
numeric_test_funcs = ["plus one", "square"]
# Test functions for query
query_func = {
"col1 < col2": "col1 < col2",
"col3 > col4": "col3 > col4",
"col1 == col2": "col1 == col2",
"(col2 > col1) and (col1 < col3)": "(col2 > col1) and (col1 < col3)",
}
query_func_keys = list(query_func.keys())
query_func_values = list(query_func.values())
# Test agg functions for apply, agg, and aggregate
agg_func = {
"sum": "sum",
"df sum": lambda df: df.sum(),
"str": str,
"sum mean": ["sum", "mean"],
"sum df sum": ["sum", lambda df: df.sum()],
"should raise TypeError": 1,
}
agg_func_keys = list(agg_func.keys())
agg_func_values = list(agg_func.values())
# For this sort of parameters pandas throws an exception.
# See details in pandas issue 36036.
agg_func_except = {
"sum sum": ["sum", "sum"],
}
agg_func_except_keys = list(agg_func_except.keys())
agg_func_except_values = list(agg_func_except.values())
numeric_agg_funcs = ["sum mean", "sum sum", "sum df sum"]
udf_func = {
"return self": lambda df: lambda x, *args, **kwargs: type(x)(x.values),
"change index": lambda df: lambda x, *args, **kwargs: pandas.Series(
x.values, index=np.arange(-1, len(x.index) - 1)
),
"return none": lambda df: lambda x, *args, **kwargs: None,
"return empty": lambda df: lambda x, *args, **kwargs: pandas.Series(),
"access self": lambda df: lambda x, other, *args, **kwargs: pandas.Series(
x.values, index=other.index
),
}
udf_func_keys = list(udf_func.keys())
udf_func_values = list(udf_func.values())
# Test q values for quantiles
quantiles = {
"0.25": 0.25,
"0.5": 0.5,
"0.75": 0.75,
"0.66": 0.66,
"0.01": 0.01,
"list": [0.25, 0.5, 0.75, 0.66, 0.01],
}
quantiles_keys = list(quantiles.keys())
quantiles_values = list(quantiles.values())
# Test indices for get, set_index, __contains__, insert
indices = {
"col1": "col1",
"col2": "col2",
"A": "A",
"B": "B",
"does not exist": "does not exist",
}
indices_keys = list(indices.keys())
indices_values = list(indices.values())
# Test functions for groupby apply
groupby_apply_func = {"sum": lambda df: df.sum(), "negate": lambda df: -df}
groupby_apply_func_keys = list(groupby_apply_func.keys())
groupby_apply_func_values = list(groupby_apply_func.values())
# Test functions for groupby agg
groupby_agg_func = {"min": "min", "max": "max"}
groupby_agg_func_keys = list(groupby_agg_func.keys())
groupby_agg_func_values = list(groupby_agg_func.values())
# Test functions for groupby transform
groupby_transform_func = {
"add 4": lambda df: df + 4,
"negatie and minus 10": lambda df: -df - 10,
}
groupby_transform_func_keys = list(groupby_transform_func.keys())
groupby_transform_func_values = list(groupby_transform_func.values())
# Test functions for groupby pipe
groupby_pipe_func = {"sum": lambda df: df.sum()}
groupby_pipe_func_keys = list(groupby_pipe_func.keys())
groupby_pipe_func_values = list(groupby_pipe_func.values())
# END Test input data and functions
# Parametrizations of common kwargs
axis = {
"over_rows_int": 0,
"over_rows_str": "rows",
"over_columns_int": 1,
"over_columns_str": "columns",
}
axis_keys = list(axis.keys())
axis_values = list(axis.values())
bool_arg = {"True": True, "False": False, "None": None}
bool_arg_keys = list(bool_arg.keys())
bool_arg_values = list(bool_arg.values())
int_arg = {"-5": -5, "-1": -1, "0": 0, "1": 1, "5": 5}
int_arg_keys = list(int_arg.keys())
int_arg_values = list(int_arg.values())
# END parametrizations of common kwargs
json_short_string = """[{"project": "modin"}]"""
json_long_string = """{
"quiz": {
"sport": {
"q1": {
"question": "Which one is correct team name in NBA?",
"options": [
"New York Bulls",
"Los Angeles Kings",
"Golden State Warriros",
"Huston Rocket"
],
"answer": "Huston Rocket"
}
},
"maths": {
"q1": {
"question": "5 + 7 = ?",
"options": [
"10",
"11",
"12",
"13"
],
"answer": "12"
},
"q2": {
"question": "12 - 8 = ?",
"options": [
"1",
"2",
"3",
"4"
],
"answer": "4"
}
}
}
}"""
json_long_bytes = BytesIO(json_long_string.encode(encoding="UTF-8"))
json_short_bytes = BytesIO(json_short_string.encode(encoding="UTF-8"))
# Text encoding types
encoding_types = [
"ascii",
"utf_32",
"utf_32_be",
"utf_32_le",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
"utf_8_sig",
]
# raising of this exceptions can be caused by unexpected behavior
# of I/O operation test, but can passed by eval_io function since
# the type of this exceptions are the same
io_ops_bad_exc = [TypeError, FileNotFoundError]
# Files compression to extension mapping
COMP_TO_EXT = {"gzip": "gz", "bz2": "bz2", "xz": "xz", "zip": "zip"}
def categories_equals(left, right):
assert (left.ordered and right.ordered) or (not left.ordered and not right.ordered)
assert_extension_array_equal(left, right)
def df_categories_equals(df1, df2):
if not hasattr(df1, "select_dtypes"):
if isinstance(df1, pandas.CategoricalDtype):
return categories_equals(df1, df2)
elif isinstance(getattr(df1, "dtype"), pandas.CategoricalDtype) and isinstance(
getattr(df1, "dtype"), pandas.CategoricalDtype
):
return categories_equals(df1.dtype, df2.dtype)
else:
return True
categories_columns = df1.select_dtypes(include="category").columns
for column in categories_columns:
assert_extension_array_equal(
df1[column].values,
df2[column].values,
check_dtype=False,
)
def df_equals(df1, df2):
"""Tests if df1 and df2 are equal.
Args:
df1: (pandas or modin DataFrame or series) dataframe to test if equal.
df2: (pandas or modin DataFrame or series) dataframe to test if equal.
Returns:
True if df1 is equal to df2.
"""
# Gets AttributError if modin's groupby object is not import like this
from modin.pandas.groupby import DataFrameGroupBy
groupby_types = (pandas.core.groupby.DataFrameGroupBy, DataFrameGroupBy)
# The typing behavior of how pandas treats its index is not consistent when the
# length of the DataFrame or Series is 0, so we just verify that the contents are
# the same.
if (
hasattr(df1, "index")
and hasattr(df2, "index")
and len(df1) == 0
and len(df2) == 0
):
if type(df1).__name__ == type(df2).__name__:
if hasattr(df1, "name") and hasattr(df2, "name") and df1.name == df2.name:
return
if (
hasattr(df1, "columns")
and hasattr(df2, "columns")
and df1.columns.equals(df2.columns)
):
return
assert False
if isinstance(df1, (list, tuple)) and all(
isinstance(d, (pd.DataFrame, pd.Series, pandas.DataFrame, pandas.Series))
for d in df1
):
assert isinstance(df2, type(df1)), "Different type of collection"
assert len(df1) == len(df2), "Different length result"
return (df_equals(d1, d2) for d1, d2 in zip(df1, df2))
# Convert to pandas
if isinstance(df1, (pd.DataFrame, pd.Series)):
df1 = to_pandas(df1)
if isinstance(df2, (pd.DataFrame, pd.Series)):
df2 = to_pandas(df2)
if isinstance(df1, pandas.DataFrame) and isinstance(df2, pandas.DataFrame):
if (df1.empty and not df2.empty) or (df2.empty and not df1.empty):
assert False, "One of the passed frames is empty, when other isn't"
elif df1.empty and df2.empty and type(df1) != type(df2):
assert (
False
), f"Empty frames have different types: {type(df1)} != {type(df2)}"
if isinstance(df1, pandas.DataFrame) and isinstance(df2, pandas.DataFrame):
assert_frame_equal(
df1,
df2,
check_dtype=False,
check_datetimelike_compat=True,
check_index_type=False,
check_column_type=False,
check_categorical=False,
)
df_categories_equals(df1, df2)
elif isinstance(df1, pandas.Index) and isinstance(df2, pandas.Index):
assert_index_equal(df1, df2)
elif isinstance(df1, pandas.Series) and isinstance(df2, pandas.Series):
assert_series_equal(df1, df2, check_dtype=False, check_series_type=False)
elif isinstance(df1, groupby_types) and isinstance(df2, groupby_types):
for g1, g2 in zip(df1, df2):
assert g1[0] == g2[0]
df_equals(g1[1], g2[1])
elif (
isinstance(df1, pandas.Series)
and isinstance(df2, pandas.Series)
and df1.empty
and df2.empty
):
assert all(df1.index == df2.index)
assert df1.dtypes == df2.dtypes
elif isinstance(df1, pandas.core.arrays.numpy_.PandasArray):
assert isinstance(df2, pandas.core.arrays.numpy_.PandasArray)
assert df1 == df2
elif isinstance(df1, np.recarray) and isinstance(df2, np.recarray):
np.testing.assert_array_equal(df1, df2)
else:
if df1 != df2:
np.testing.assert_almost_equal(df1, df2)
def modin_df_almost_equals_pandas(modin_df, pandas_df):
df_categories_equals(modin_df._to_pandas(), pandas_df)
modin_df = to_pandas(modin_df)
if hasattr(modin_df, "select_dtypes"):
modin_df = modin_df.select_dtypes(exclude=["category"])
if hasattr(pandas_df, "select_dtypes"):
pandas_df = pandas_df.select_dtypes(exclude=["category"])
difference = modin_df - pandas_df
diff_max = difference.max()
if isinstance(diff_max, pandas.Series):
diff_max = diff_max.max()
assert (
modin_df.equals(pandas_df)
or diff_max < 0.0001
or (all(modin_df.isna().all()) and all(pandas_df.isna().all()))
)
def df_is_empty(df):
"""Tests if df is empty.
Args:
df: (pandas or modin DataFrame) dataframe to test if empty.
Returns:
True if df is empty.
"""
assert df.size == 0 and df.empty
assert df.shape[0] == 0 or df.shape[1] == 0
def arg_keys(arg_name, keys):
"""Appends arg_name to the front of all values in keys.
Args:
arg_name: (string) String containing argument name.
keys: (list of strings) Possible inputs of argument.
Returns:
List of strings with arg_name append to front of keys.
"""
return ["{0}_{1}".format(arg_name, key) for key in keys]
def name_contains(test_name, vals):
"""Determines if any string in vals is a substring of test_name.
Args:
test_name: (string) String to determine if contains substrings.
vals: (list of strings) List of substrings to test for.
Returns:
True if a substring in vals is in test_name, else False.
"""
return any(val in test_name for val in vals)
def check_df_columns_have_nans(df, cols):
"""Checks if there are NaN values in specified columns of a dataframe.
:param df: Dataframe to check.
:param cols: One column name or list of column names.
:return:
True if specified columns of dataframe contains NaNs.
"""
return (
pandas.api.types.is_list_like(cols)
and (
any(isinstance(x, str) and x in df.columns and df[x].hasnans for x in cols)
or any(
isinstance(x, pd.Series) and x._parent is df and x.hasnans for x in cols
)
)
) or (
not pandas.api.types.is_list_like(cols)
and cols in df.columns
and df[cols].hasnans
)
def eval_general(
modin_df,
pandas_df,
operation,
comparator=df_equals,
__inplace__=False,
check_exception_type=True,
raising_exceptions=None,
check_kwargs_callable=True,
md_extra_kwargs=None,
**kwargs,
):
if raising_exceptions:
assert (
check_exception_type
), "if raising_exceptions is not None or False, check_exception_type should be True"
md_kwargs, pd_kwargs = {}, {}
def execute_callable(fn, inplace=False, md_kwargs={}, pd_kwargs={}):
try:
pd_result = fn(pandas_df, **pd_kwargs)
except Exception as pd_e:
if check_exception_type is None:
return None
with pytest.raises(Exception) as md_e:
# repr to force materialization
repr(fn(modin_df, **md_kwargs))
if check_exception_type:
assert isinstance(md_e.value, type(pd_e))
if raising_exceptions:
assert not isinstance(
md_e.value, tuple(raising_exceptions)
), f"not acceptable exception type: {md_e.value}"
else:
md_result = fn(modin_df, **md_kwargs)
return (md_result, pd_result) if not __inplace__ else (modin_df, pandas_df)
for key, value in kwargs.items():
if check_kwargs_callable and callable(value):
values = execute_callable(value)
# that means, that callable raised an exception
if values is None:
return
else:
md_value, pd_value = values
else:
md_value, pd_value = value, value
md_kwargs[key] = md_value
pd_kwargs[key] = pd_value
if md_extra_kwargs:
assert isinstance(md_extra_kwargs, dict)
md_kwargs.update(md_extra_kwargs)
values = execute_callable(
operation, md_kwargs=md_kwargs, pd_kwargs=pd_kwargs, inplace=__inplace__
)
if values is not None:
comparator(*values)
def eval_io(
fn_name,
comparator=df_equals,
cast_to_str=False,
check_exception_type=True,
raising_exceptions=io_ops_bad_exc,
check_kwargs_callable=True,
modin_warning=None,
md_extra_kwargs=None,
*args,
**kwargs,
):
"""Evaluate I/O operation outputs equality check.
Parameters
----------
fn_name: str
I/O operation name ("read_csv" for example).
comparator: obj
Function to perform comparison.
cast_to_str: bool
There could be some missmatches in dtypes, so we're
casting the whole frame to `str` before comparison.
See issue #1931 for details.
check_exception_type: bool
Check or not exception types in the case of operation fail
(compare exceptions types raised by Pandas and Modin).
raising_exceptions: Exception or list of Exceptions
Exceptions that should be raised even if they are raised
both by Pandas and Modin (check evaluated only if
`check_exception_type` passed as `True`).
modin_warning: obj
Warning that should be raised by Modin.
md_extra_kwargs: dict
Modin operation specific kwargs.
"""
def applyier(module, *args, **kwargs):
result = getattr(module, fn_name)(*args, **kwargs)
if cast_to_str:
result = result.astype(str)
return result
def call_eval_general():
eval_general(
pd,
pandas,
applyier,
check_exception_type=check_exception_type,
raising_exceptions=raising_exceptions,
check_kwargs_callable=check_kwargs_callable,
md_extra_kwargs=md_extra_kwargs,
*args,
**kwargs,
)
if modin_warning:
with pytest.warns(modin_warning):
call_eval_general()
else:
call_eval_general()
def eval_io_from_str(csv_str: str, unique_filename: str, **kwargs):
"""Evaluate I/O operation outputs equality check by using `csv_str`
data passed as python str (csv test file will be created from `csv_str`).
Parameters
----------
csv_str: str
Test data for storing to csv file.
unique_filename: str
csv file name.
"""
try:
with open(unique_filename, "w") as f:
f.write(csv_str)
eval_io(
filepath_or_buffer=unique_filename,
fn_name="read_csv",
**kwargs,
)
finally:
if os.path.exists(unique_filename):
try:
os.remove(unique_filename)
except PermissionError:
pass
def create_test_dfs(*args, **kwargs):
post_fn = kwargs.pop("post_fn", lambda df: df)
return map(
post_fn, [pd.DataFrame(*args, **kwargs), pandas.DataFrame(*args, **kwargs)]
)
def generate_dfs():
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
)
df2 = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col6": [12, 13, 14, 15],
"col7": [0, 0, 0, 0],
}
)
return df, df2
def generate_multiindex_dfs(axis=1):
def generate_multiindex(index):
return pandas.MultiIndex.from_tuples(
[("a", x) for x in index.values], names=["name1", "name2"]
)
df1, df2 = generate_dfs()
df1.axes[axis], df2.axes[axis] = map(
generate_multiindex, [df1.axes[axis], df2.axes[axis]]
)
return df1, df2
def generate_multiindex(elements_number, nlevels=2, is_tree_like=False):
def generate_level(length, nlevel):
src = ["bar", "baz", "foo", "qux"]
return [src[i % len(src)] + f"-{nlevel}-{i}" for i in range(length)]
if is_tree_like:
for penalty_level in [0, 1]:
lvl_len_f, lvl_len_d = math.modf(
round(elements_number ** (1 / (nlevels - penalty_level)), 12)
)
if lvl_len_d >= 2 and lvl_len_f == 0:
break
if lvl_len_d < 2 or lvl_len_f != 0:
raise RuntimeError(
f"Can't generate Tree-like MultiIndex with lenght: {elements_number} and number of levels: {nlevels}"
)
lvl_len = int(lvl_len_d)
result = pd.MultiIndex.from_product(
[generate_level(lvl_len, i) for i in range(nlevels - penalty_level)],
names=[f"level-{i}" for i in range(nlevels - penalty_level)],
)
if penalty_level:
result = pd.MultiIndex.from_tuples(
[("base_level", *ml_tuple) for ml_tuple in result],
names=[f"level-{i}" for i in range(nlevels)],
)
return result.sort_values()
else:
base_level = ["first"] * (elements_number // 2 + elements_number % 2) + [
"second"
] * (elements_number // 2)
primary_levels = [generate_level(elements_number, i) for i in range(1, nlevels)]
arrays = [base_level] + primary_levels
return pd.MultiIndex.from_tuples(
list(zip(*arrays)), names=[f"level-{i}" for i in range(nlevels)]
).sort_values()
def generate_none_dfs():
df = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, None, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [None, None, None, None],
}
)
df2 = pandas.DataFrame(
{
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col6": [12, 13, 14, 15],
"col7": [0, 0, 0, 0],
}
)
return df, df2
def get_unique_filename(
test_name: str = "test",
kwargs: dict = {},
extension: str = "csv",
data_dir: str = IO_OPS_DATA_DIR,
suffix: str = "",
debug_mode=False,
):
"""Returns unique file name with specified parameters.
Parameters
----------
test_name: str
name of the test for which the unique file name is needed.
kwargs: list of ints
Unique combiantion of test parameters for creation of unique name.
extension: str
Extension of unique file.
data_dir: str
Data directory where test files will be created.
suffix: str
String to append to the resulted name.
debug_mode: bool
Get unique filename containing kwargs values.
Otherwise kwargs values will be replaced with hash equivalent.
Returns
-------
Unique file name.
"""
suffix_part = f"_{suffix}" if suffix else ""
extension_part = f".{extension}" if extension else ""
if debug_mode:
# shortcut if kwargs parameter are not provided
if len(kwargs) == 0 and extension == "csv" and suffix == "":
return os.path.join(data_dir, (test_name + suffix_part + f".{extension}"))
assert "." not in extension, "please provide pure extension name without '.'"
prohibited_chars = ['"', "\n"]
non_prohibited_char = "np_char"
char_counter = 0
kwargs_name = dict(kwargs)
for key, value in kwargs_name.items():
for char in prohibited_chars:
if isinstance(value, str) and char in value or callable(value):
kwargs_name[key] = non_prohibited_char + str(char_counter)
char_counter += 1
parameters_values = "_".join(
[
str(value)
if not isinstance(value, (list, tuple))
else "_".join([str(x) for x in value])
for value in kwargs_name.values()
]
)
return os.path.join(
data_dir, test_name + parameters_values + suffix_part + extension_part
)
else:
import uuid
return os.path.join(data_dir, uuid.uuid1().hex + suffix_part + extension_part)
def get_random_string():
random_string = "".join(
random_state.choice([x for x in ascii_letters], size=10).tolist()
)
return random_string
def insert_lines_to_csv(
csv_name: str,
lines_positions: list,
lines_type: str = "blank",
encoding: str = None,
**csv_reader_writer_params,
):
"""Insert lines to ".csv" file.
Parameters
----------
csv_name: str
".csv" file that should be modified.
lines_positions: list of ints
Lines postions that sghould be modified (serial number
of line - begins from 0, ends in <rows_number> - 1).
lines_type: str
Lines types that should be inserted to ".csv" file. Possible types:
"blank" - empty line without any delimiters/separators,
"bad" - lines with len(lines_data) > cols_number
encoding: str
Encoding type that should be used during file reading and writing.
"""
cols_number = len(pandas.read_csv(csv_name, nrows=1).columns)
if lines_type == "blank":
lines_data = []
elif lines_type == "bad":
cols_number = len(pandas.read_csv(csv_name, nrows=1).columns)
lines_data = [x for x in range(cols_number + 1)]
else:
raise ValueError(
f"acceptable values for parameter are ['blank', 'bad'], actually passed {lines_type}"
)
lines = []
dialect = "excel"
with open(csv_name, "r", encoding=encoding, newline="") as read_file:
try:
dialect = csv.Sniffer().sniff(read_file.read())
read_file.seek(0)
except Exception:
dialect = None
reader = csv.reader(
read_file,
dialect=dialect if dialect is not None else "excel",
**csv_reader_writer_params,
)
counter = 0
for row in reader:
if counter in lines_positions:
lines.append(lines_data)
else:
lines.append(row)
counter += 1
with open(csv_name, "w", encoding=encoding, newline="") as write_file:
writer = csv.writer(
write_file,
dialect=dialect if dialect is not None else "excel",
**csv_reader_writer_params,
)
writer.writerows(lines)
def _get_open_files():
"""
psutil open_files() can return a lot of extra information that we can allow to
be different, like file position; for simplicity we care about path and fd only.
"""
return sorted((info.path, info.fd) for info in psutil.Process().open_files())
def check_file_leaks(func):
"""
A decorator that ensures that no *newly* opened file handles are left
after decorated function is finished.
"""
if not TrackFileLeaks.get():
return func
@functools.wraps(func)
def check(*a, **kw):
fstart = _get_open_files()
try:
return func(*a, **kw)
finally:
leaks = []
for item in _get_open_files():
try:
fstart.remove(item)
except ValueError:
# ignore files in /proc/, as they have nothing to do with
# modin reading any data (and this is what we care about)
if not item[0].startswith("/proc/"):
leaks.append(item)
assert (
not leaks
), f"Unexpected open handles left for: {', '.join(item[0] for item in leaks)}"
return check
def dummy_decorator():
"""A problematic decorator that does not use `functools.wraps`. This introduces unwanted local variables for
inspect.currentframe. This decorator is used in test_io to test `read_csv` and `read_table`
"""
def wrapper(method):
def wrapped_function(self, *args, **kwargs):
result = method(self, *args, **kwargs)
return result
return wrapped_function
return wrapper
def generate_dataframe(row_size=NROWS, additional_col_values=None):
dates = pandas.date_range("2000", freq="h", periods=row_size)
data = {
"col1": np.arange(row_size) * 10,
"col2": [str(x.date()) for x in dates],
"col3": np.arange(row_size) * 10,
"col4": [str(x.time()) for x in dates],
"col5": [get_random_string() for _ in range(row_size)],
"col6": random_state.uniform(low=0.0, high=10000.0, size=row_size),
}
if additional_col_values is not None:
assert isinstance(additional_col_values, (list, tuple))
data.update(
{
"col7": random_state.choice(additional_col_values, size=row_size),
}
)
return pandas.DataFrame(data)
def _make_csv_file(filenames):
def _csv_file_maker(
filename,
row_size=NROWS,
force=True,
delimiter=",",
encoding=None,
compression="infer",
additional_col_values=None,
remove_randomness=False,
add_blank_lines=False,
add_bad_lines=False,
add_nan_lines=False,
thousands_separator=None,
decimal_separator=None,
comment_col_char=None,
quoting=csv.QUOTE_MINIMAL,
quotechar='"',
doublequote=True,
escapechar=None,
line_terminator=None,
):
if os.path.exists(filename) and not force:
pass
else:
df = generate_dataframe(row_size, additional_col_values)
if remove_randomness:
df = df[["col1", "col2", "col3", "col4"]]
if add_nan_lines:
for i in range(0, row_size, row_size // (row_size // 10)):
df.loc[i] = pandas.Series()
if comment_col_char:
char = comment_col_char if isinstance(comment_col_char, str) else "#"
df.insert(
loc=0,
column="col_with_comments",
value=[char if (x + 2) == 0 else x for x in range(row_size)],
)
if thousands_separator:
for col_id in ["col1", "col3"]:
df[col_id] = df[col_id].apply(
lambda x: f"{x:,d}".replace(",", thousands_separator)
)
df["col6"] = df["col6"].apply(
lambda x: f"{x:,f}".replace(",", thousands_separator)
)
filename = (
f"{filename}.{COMP_TO_EXT[compression]}"
if compression != "infer"
else filename
)
df.to_csv(
filename,
sep=delimiter,
encoding=encoding,
compression=compression,
index=False,
decimal=decimal_separator if decimal_separator else ".",
line_terminator=line_terminator,
quoting=quoting,
quotechar=quotechar,
doublequote=doublequote,
escapechar=escapechar,
)
csv_reader_writer_params = {
"delimiter": delimiter,
"doublequote": doublequote,
"escapechar": escapechar,
"lineterminator": line_terminator if line_terminator else os.linesep,
"quotechar": quotechar,
"quoting": quoting,
}
if add_blank_lines:
insert_lines_to_csv(
csv_name=filename,
lines_positions=[
x for x in range(5, row_size, row_size // (row_size // 10))
],
lines_type="blank",
encoding=encoding,
**csv_reader_writer_params,
)
if add_bad_lines:
insert_lines_to_csv(
csv_name=filename,
lines_positions=[
x for x in range(6, row_size, row_size // (row_size // 10))
],
lines_type="bad",
encoding=encoding,
**csv_reader_writer_params,
)
filenames.append(filename)
return df
return _csv_file_maker
def teardown_test_file(test_path):
if os.path.exists(test_path):
# PermissionError can occure because of issue #2533
try:
os.remove(test_path)
except PermissionError:
pass
def teardown_test_files(test_paths: list):
for path in test_paths:
teardown_test_file(path)
def sort_index_for_equal_values(series, ascending=False):
if series.index.dtype == np.float64:
# HACK: workaround for pandas bug:
# https://github.com/pandas-dev/pandas/issues/34455
series.index = series.index.astype("str")
res = series.groupby(series, sort=False).apply(
lambda df: df.sort_index(ascending=ascending)
)
if res.index.nlevels > series.index.nlevels:
# Sometimes GroupBy adds an extra level with 'by' to the result index.
# GroupBy is very inconsistent about when it's doing this, so that's
# why this clumsy if-statement is used.
res.index = res.index.droplevel(0)
res.name = series.name
return res
|
from transformers import EvalPrediction
from sklearn.metrics import precision_recall_fscore_support
import numpy as np
def compute_metrics(pred: EvalPrediction):
"""Compute recall at the masked position
"""
mask = pred.label_ids != -100
# filter everything except the masked position and flatten tensors
labels = pred.label_ids[mask].flatten()
preds = pred.predictions[mask].flatten()
_, recall, _, _ = precision_recall_fscore_support(y_true=labels, y_pred=preds, average='micro')
return {'recall': recall}
def self_test():
pred = EvalPrediction(
label_ids=np.array([
[-100, 1, -100],
[ 2, -100, -100],
[-100, -100, 3],
[-100, -100, 4]
]),
predictions=np.array([
[-100, 1, -100], # 1 true positive
[ 2, -100, -100], # 1 true positive
[ 2, 6, 8], # 1 false positive, irrelevant pos will be ignored
[ 1, 7, 4] # 1 true positive, irrelevant pos will be ignored
])
)
m = compute_metrics(pred)
print(f"recall={m["recall"]}")
assert m['recall'] == 0.75
print("Looks like it is working!")
if __name__ == "__main__":
self_test()
|
from transformers import EvalPrediction
from sklearn.metrics import precision_recall_fscore_support
import numpy as np
def compute_metrics(pred: EvalPrediction):
"""Compute recall at the masked position
"""
mask = pred.label_ids != -100
# filter everything except the masked position and flatten tensors
labels = pred.label_ids[mask].flatten()
preds = pred.predictions[mask].flatten()
_, recall, _, _ = precision_recall_fscore_support(y_true=labels, y_pred=preds, average='micro')
return {'recall': recall}
def self_test():
pred = EvalPrediction(
label_ids=np.array([
[-100, 1, -100],
[ 2, -100, -100],
[-100, -100, 3],
[-100, -100, 4]
]),
predictions=np.array([
[-100, 1, -100], # 1 true positive
[ 2, -100, -100], # 1 true positive
[ 2, 6, 8], # 1 false positive, irrelevant pos will be ignored
[ 1, 7, 4] # 1 true positive, irrelevant pos will be ignored
])
)
m = compute_metrics(pred)
print(f"recall={m['recall']}")
assert m['recall'] == 0.75
print("Looks like it is working!")
if __name__ == "__main__":
self_test()
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Operators for nn."""
import math
import operator
from functools import reduce
import numpy as np
from ... import context
from .. import signature as sig
from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ...common import dtype as mstype
from ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register
from ..operations.math_ops import _infer_shape_reduce
def _check_positive_int_or_tuple(arg_name, arg_value, prim_name, allow_four=False, ret_four=False):
"""
Checks whether an argument is a positive int or tuple with 2 or 4(when allow_four is True) positive int elements.
"""
def _raise_message():
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of two "
f"{"or four " if allow_four else ""}positive int numbers, but got {arg_value}")
def _get_return_value():
if isinstance(arg_value, int):
ret = (1, 1, arg_value, arg_value) if ret_four else (arg_value, arg_value)
elif len(arg_value) == 2:
ret = (1, 1, arg_value[0], arg_value[1]) if ret_four else arg_value
elif len(arg_value) == 4:
if not allow_four:
_raise_message()
ret = arg_value if ret_four else (arg_value[2], arg_value[3])
else:
_raise_message()
return ret
validator.check_value_type(arg_name, arg_value, (int, tuple), prim_name)
ret_value = _get_return_value()
for item in ret_value:
if isinstance(item, int) and item > 0:
continue
_raise_message()
return ret_value
class Flatten(PrimitiveWithInfer):
r"""
Flattens a tensor without changing its batch size on the 0-th axis.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)` to be flattened.
Outputs:
Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
the product of the remaining dimension.
Examples:
>>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
>>> flatten = P.Flatten()
>>> output = flatten(input_tensor)
>>> assert output.shape == (1, 24)
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
validator.check_integer('input_x rank', len(input_x), 1, Rel.GE, self.name)
prod = 1 if len(input_x) == 1 else reduce(operator.mul, input_x[1:])
return input_x[0], prod
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class Softmax(PrimitiveWithInfer):
r"""
Softmax operation.
Applies the Softmax operation to the input tensor on the specified axis.
Suppose a slice in the given aixs :math:`x` then for each element :math:`x_i`
the Softmax function is shown as follows:
.. math::
\text{output}(x_i) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)},
where :math:`N` is the length of the tensor.
Args:
axis (Union[int, tuple]): The axis to do the Softmax operation. Default: -1.
Inputs:
- **logits** (Tensor) - The input of Softmax, with float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the logits.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softmax = P.Softmax()
>>> softmax(input_x)
[0.01165623, 0.03168492, 0.08612854, 0.23412167, 0.6364086]
"""
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type("axis", axis, [int, tuple], self.name)
if isinstance(axis, int):
self.add_prim_attr('axis', (axis,))
for item in self.axis:
validator.check_value_type("item of axis", item, [int], self.name)
def infer_shape(self, logits):
validator.check_integer("length of axis", len(self.axis), 1, Rel.GE, self.name)
rank = len(logits)
for axis_v in self.axis:
validator.check_int_range("axis", axis_v, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class LogSoftmax(PrimitiveWithInfer):
r"""
Log Softmax activation function.
Applies the Log Softmax function to the input tensor on the specified axis.
Suppose a slice in the given aixs :math:`x` then for each element :math:`x_i`
the Log Softmax function is shown as follows:
.. math::
\text{output}(x_i) = \log \left(\frac{exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
where :math:`N` is the length of the Tensor.
Args:
axis (int): The axis to do the Log softmax operation. Default: -1.
Inputs:
- **logits** (Tensor) - The input of Log Softmax, with float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the logits.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> log_softmax = P.LogSoftmax()
>>> log_softmax(input_x)
[-4.4519143, -3.4519143, -2.4519143, -1.4519144, -0.4519144]
"""
@prim_attr_register
def __init__(self, axis=-1):
validator.check_value_type("axis", axis, [int], self.name)
def infer_shape(self, logits):
rank = len(logits)
validator.check_int_range('axis', self.axis, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class Softplus(PrimitiveWithInfer):
r"""
Softplus activation function.
Softplus is a smooth approximation to the ReLU function.
The function is shown as follows:
.. math::
\text{output} = \log(1 + \exp(\text{input_x})),
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softplus = P.Softplus()
>>> softplus(input_x)
[1.3132615, 2.126928, 3.0485873, 4.01815, 5.0067153]
"""
@prim_attr_register
def __init__(self):
"""init Softplus"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class Softsign(PrimitiveWithInfer):
r"""
Softsign activation function.
The function is shown as follows:
.. math::
\text{output} = \frac{\text{input_x}}{1 + \left| \text{input_x} \right|},
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)
>>> softsign = P.Softsign()
>>> softsign(input_x)
[0. -0.5 0.6666667 0.9677419 -0.9677419]
"""
@prim_attr_register
def __init__(self):
"""init Softsign"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class ReLU(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) of input tensor element-wise.
It returns :math:`\max(x,\ 0)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu = P.ReLU()
>>> result = relu(input_x)
[[0, 4.0, 0.0], [2.0, 0.0, 9.0]]
"""
@prim_attr_register
def __init__(self):
"""init ReLU"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.number_type, self.name)
return input_x
class ReLU6(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) upper bounded by 6 of input tensor element-wise.
It returns :math:`\min(\max(0,x), 6)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor. With float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu6 = P.ReLU6()
>>> result = relu6(input_x)
"""
@prim_attr_register
def __init__(self):
"""init ReLU6"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class ReLUV2(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) of input tensor element-wise.
It returns :math:`\max(x,\ 0)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor should be a 4-D tensor.
Outputs:
- **output** (Tensor) - Has the same type and shape as the `input_x`.
- **mask** (Tensor) - A tensor whose data type must be uint8.
Examples:
>>> input_x = Tensor(np.array([[[[1, -2], [-3, 4]], [[-5, 6], [7, -8]]]]), mindspore.float32)
>>> relu_v2 = P.ReLUV2()
>>> output = relu_v2(input_x)
([[[[1., 0.], [0., 4.]], [[0., 6.], [7., 0.]]]],
[[[[1, 0], [2, 0]], [[2, 0], [1, 0]]]])
"""
@prim_attr_register
def __init__(self):
"""init ReLUV2"""
self.init_prim_io_names(inputs=['x'], outputs=['output', 'mask'])
def __infer__(self, input_x):
input_shape = list(input_x['shape'])
input_dtype = input_x['dtype']
mask_shape = []
if len(input_shape) != 4:
raise ValueError("The `input_x` should be a 4-D tensor, "
f"but got a {len(input_shape)}-D tensor whose shape is {input_shape}")
for i in enumerate(input_shape):
if i[0] == 1:
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append((input_shape[1] + 31) // 32)
else:
mask_shape.append((input_shape[1] + 15) // 16)
else:
mask_shape.append(i[1])
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append(4)
else:
mask_shape.append(2)
output_shape = (input_x['shape'], mask_shape)
validator.check_subclass("input_x", input_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({'input_x': input_dtype}, mstype.number_type, self.name)
mask_dtype = mstype.uint8
output_dtype = (input_dtype, mask_dtype)
return {'shape': output_shape,
'dtype': output_dtype,
'value': None}
class Elu(PrimitiveWithInfer):
r"""
Computes exponential linear: `alpha * (exp(x) - 1)` if x < 0, `x` otherwise.
The data type of input tensor should be float.
Args:
alpha (float): The coefficient of negative factor whose type is float,
only support '1.0' currently. Default: 1.0.
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float.
Outputs:
Tensor, has the same shape and data type as `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> elu = P.Elu()
>>> result = elu(input_x)
Tensor([[-0.632 4.0 -0.999]
[2.0 -0.993 9.0 ]], shape=(2, 3), dtype=mindspore.float32)
"""
@prim_attr_register
def __init__(self, alpha=1.0):
"""Init Elu"""
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_number("alpha", alpha, 1.0, Rel.EQ, self.name)
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class HSwish(PrimitiveWithInfer):
r"""
Hard swish activation function.
Applies hswish-type activation element-wise. The input is a Tensor with any valid shape.
Hard swish is defined as:
.. math::
\text{hswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6},
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Inputs:
- **input_data** (Tensor) - The input of HSwish, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> hswish = P.HSwish()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hswish(input_x)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, xshape):
return xshape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Sigmoid(PrimitiveWithInfer):
r"""
Sigmoid activation function.
Computes Sigmoid of input element-wise. The Sigmoid function is defined as:
.. math::
\text{sigmoid}(x_i) = \frac{1}{1 + exp(-x_i)},
where :math:`x_i` is the element of the input.
Inputs:
- **input_x** (Tensor) - The input of Sigmoid, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the input_x.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> sigmoid = P.Sigmoid()
>>> sigmoid(input_x)
[0.73105866, 0.880797, 0.9525742, 0.98201376, 0.9933071]
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class HSigmoid(PrimitiveWithInfer):
r"""
Hard sigmoid activation function.
Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape.
Hard sigmoid is defined as:
.. math::
\text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})),
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Inputs:
- **input_data** (Tensor) - The input of HSigmoid, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> hsigmoid = P.HSigmoid()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hsigmoid(input_x)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Tanh(PrimitiveWithInfer):
r"""
Tanh activation function.
Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
.. math::
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
where :math:`x_i` is an element of the input Tensor.
Inputs:
- **input_x** (Tensor) - The input of Tanh.
Outputs:
Tensor, with the same type and shape as the input_x.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> tanh = P.Tanh()
>>> tanh(input_x)
[0.7615941, 0.9640276, 0.9950548, 0.9993293, 0.99990916]
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class FusedBatchNorm(Primitive):
r"""
FusedBatchNorm is a BatchNorm that moving mean and moving variance will be computed instead of being loaded.
Batch Normalization is widely used in convolutional networks. This operation applies
Batch Normalization over input to avoid internal covariate shift as described in the
paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
feature using a mini-batch of data and the learned parameters which can be described
in the following formula.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
mode (int): Mode of batch normalization, value is 0 or 1. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
momentum (float): The hyper parameter to compute moving average for running_mean and running_var
(e.g. :math:`new\_running\_mean = momentum * running\_mean + (1 - momentum) * current\_mean`).
Momentum value should be [0, 1]. Default: 0.9.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, C)`.
- **scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`.
Outputs:
Tuple of 5 Tensor, the normalized input and the updated parameters.
- **output_x** (Tensor) - The same type and shape as the `input_x`.
- **updated_scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_moving_mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_moving_variance** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> op = P.FusedBatchNorm()
>>> output = op(input_x, scale, bias, mean, variance)
"""
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
class FusedBatchNormEx(PrimitiveWithInfer):
r"""
FusedBatchNormEx is an extension of FusedBatchNorm, FusedBatchNormEx has one more output(output reserve)
than FusedBatchNorm, reserve will be used in backpropagation phase. FusedBatchNorm is a BatchNorm that
moving mean and moving variance will be computed instead of being loaded.
Batch Normalization is widely used in convolutional networks. This operation applies
Batch Normalization over input to avoid internal covariate shift as described in the
paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
feature using a mini-batch of data and the learned parameters which can be described
in the following formula.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
mode (int): Mode of batch normalization, value is 0 or 1. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
momentum (float): The hyper parameter to compute moving average for running_mean and running_var
(e.g. :math:`new\_running\_mean = momentum * running\_mean + (1 - momentum) * current\_mean`).
Momentum value should be [0, 1]. Default: 0.9.
Inputs:
- **input_x** (Tensor) - The input of FusedBatchNormEx, Tensor of shape :math:`(N, C)`,
data type: float16 or float32.
- **scale** (Tensor) - Parameter scale, same with gamma above-mentioned, Tensor of shape :math:`(C,)`,
data type: float32.
- **bias** (Tensor) - Parameter bias, same with beta above-mentioned, Tensor of shape :math:`(C,)`,
data type: float32.
- **mean** (Tensor) - mean value, Tensor of shape :math:`(C,)`, data type: float32.
- **variance** (Tensor) - variance value, Tensor of shape :math:`(C,)`, data type: float32.
Outputs:
Tuple of 6 Tensors, the normalized input, the updated parameters and reserve.
- **output_x** (Tensor) - The input of FusedBatchNormEx, same type and shape as the `input_x`.
- **updated_scale** (Tensor) - Updated parameter scale, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_bias** (Tensor) - Updated parameter bias, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_moving_mean** (Tensor) - Updated mean value, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_moving_variance** (Tensor) - Updated variance value, Tensor of shape :math:`(C,)`,
data type: float32.
- **reserve** (Tensor) - reserve space, Tensor of shape :math:`(C,)`, data type: float32.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> op = P.FusedBatchNormEx()
>>> output = op(input_x, scale, bias, mean, variance)
"""
__mindspore_signature__ = (
sig.make_sig('input_x', dtype=sig.sig_dtype.T2),
sig.make_sig('scale', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('bias', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('mean', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('variance', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'save_scale', 'save_bias', 'save_mean', 'save_inv_variance', 'reserve'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
self.add_prim_attr('data_format', "NCHW")
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
valid_types = [mstype.tensor_type(mstype.float32)]
validator.check_type_same(args_moving, valid_types, self.name)
return (input_x, scale, scale, scale, scale, scale)
class BNTrainingReduce(PrimitiveWithInfer):
"""
reduce sum at axis [0, 2, 3].
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N, C)`.
Outputs:
- **sum** (Tensor) - Tensor of shape :math:`(C,)`.
- **square_sum** (Tensor) - Tensor of shape :math:`(C,)`.
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['sum', 'square_sum'])
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
return ([x_shape[1]], [x_shape[1]])
def infer_dtype(self, x_type):
return (x_type, x_type)
class BNTrainingUpdate(PrimitiveWithInfer):
"""
The primitive operator of the register and info descriptor in bn_training_update.
"""
@prim_attr_register
def __init__(self, isRef=True, epsilon=1e-5, factor=0.1):
self.init_prim_io_names(inputs=['x', 'sum', 'square_sum', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
#self.isRef = validator.check_integer('isRef', isRef, [0, 1], Rel.IN)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, 'BNTrainingUpdate')
self.factor = validator.check_number_range('factor', factor, 0, 1, Rel.INC_BOTH, 'BNTrainingUpdate')
def infer_shape(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
def infer_dtype(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
class BatchNorm(PrimitiveWithInfer):
r"""
Batch Normalization for input data and updated parameters.
Batch Normalization is widely used in convolutional neural networks. This operation
applies Batch Normalization over input to avoid internal covariate shift as described
in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
features using a mini-batch of data and the learned parameters which can be described
in the following formula,
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
is_training (bool): If `is_training` is True, `mean` and `variance` are computed during training.
If `is_training` is False, they're loaded from checkpoint during inference. Default: False.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, C)`, with float16 or float32 data type.
- **scale** (Tensor) - Tensor of shape :math:`(C,)`, with float16 or float32 data type.
- **bias** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `scale`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`, with float16 or float32 data type.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `mean`.
Outputs:
Tuple of 5 Tensor, the normalized inputs and the updated parameters.
- **output_x** (Tensor) - The same type and shape as the input_x. The shape is :math:`(N, C)`.
- **updated_scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **reserve_space_1** (Tensor) - Tensor of shape :math:`(C,)`.
- **reserve_space_2** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> batch_norm = P.BatchNorm()
>>> output = batch_norm(input_x, scale, bias, mean, variance)
"""
@prim_attr_register
def __init__(self, is_training=False, epsilon=1e-5):
validator.check_value_type('is_training', is_training, (bool,), self.name)
validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.add_prim_attr('data_format', "NCHW")
self.init_prim_io_names(inputs=['x', 'scale', 'offset', 'mean', 'variance'],
outputs=['y', 'batch_mean', 'batch_variance', 'reserve_space_1', 'reserve_space_2'])
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
if not self.is_training:
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
if self.is_training:
valid_types = [mstype.tensor_type(mstype.float16), mstype.tensor_type(mstype.float32), None]
validator.check_type_same(args_moving, valid_types, self.name)
else:
args_moving = {"mean": mean, "variance": variance}
validator.check_tensor_type_same(args_moving, [mstype.float16, mstype.float32], self.name)
return (input_x, scale, bias, input_x, input_x)
class Conv2D(PrimitiveWithInfer):
r"""
2D convolution layer.
Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
where :math:`N` is batch size and :math:`C_{in}` is channel number. For each batch of shape
:math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_. More detailed introduction can be found here:
http://cs231n.github.io/convolutional-networks/.
Args:
out_channel (int): The dimension of the output.
kernel_size (Union[int, tuple[int]]): The kernel size of the 2D convolution.
mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union(int, tuple[int])): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the
padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly.
stride (Union(int, tuple[int])): The stride to be applied to the convolution filter. Default: 1.
dilation (Union(int, tuple[int])): Specify the space to use between kernel elements. Default: 1.
group (int): Split input into groups. Default: 1.
Returns:
Tensor, the value that applied 2D convolution.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
- **weight** (Tensor) - Set size of kernel is :math:`(K_1, K_2)`, then the shape is
:math:`(C_{out}, C_{in}, K_1, K_2)`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
>>> conv2d = P.Conv2D(out_channel=32, kernel_size=3)
>>> conv2d(input, weight)
"""
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
mode=1,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
"""init Conv2D"""
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check(f"x_shape[1] / group", x_shape[1] // self.group, "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('out_channel', self.out_channel, 'w_shape[0]', w_shape[0], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_h = w_shape[2]
kernel_size_w = w_shape[3]
stride_h = self.stride[2]
stride_w = self.stride[3]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = [pad_top, pad_bottom, pad_left, pad_right]
self.add_prim_attr('pad_list', (pad_top, pad_bottom, pad_left, pad_right))
out_channel = self.out_channel
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class DepthwiseConv2dNative(PrimitiveWithInfer):
r"""
Returns the depth-wise convolution value for the input.
Applies depthwise conv2d for the input, which will generate more channels with channel_multiplier.
Given an input tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` where :math:`N` is the batch size and a
filter tensor with kernel size :math:`(ks_{h}, ks_{w})`, containing :math:`C_{in} * \text{channel_multiplier}`
convolutional filters of depth 1; it applies different filters to each input channel (channel_multiplier channels
for each input channel has the default value 1), then concatenates the results together. The output has
:math:`\text{in_channels} * \text{channel_multiplier}` channels.
Args:
channel_multiplier (int): The multipiler for the original output convolution. Its value must be greater than 0.
kernel_size (Union[int, tuple[int]]): The size of the convolution kernel.
mode (int): Modes for different convolutions. 0 Math convolution, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 3.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union[int, tuple[int]]): The pad value to be filled. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the padding
of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly. Default: 0.
stride (Union[int, tuple[int]]): The stride to be applied to the convolution filter. Default: 1.
dilation (Union[int, tuple[int]]): Specifies the dilation rate to be used for the dilated convolution.
Default: 1.
group (int): Splits input into groups. Default: 1.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
- **weight** (Tensor) - Set the size of kernel as :math:`(K_1, K_2)`, then the shape is
:math:`(K, C_{in}, K_1, K_2)`, `K` must be 1.
Outputs:
Tensor of shape :math:`(N, C_{in} * \text{channel_multiplier}, H_{out}, W_{out})`.
Examples:
>>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
>>> weight = Tensor(np.ones([1, 32, 3, 3]), mindspore.float32)
>>> depthwise_conv2d = P.DepthwiseConv2dNative(channel_multiplier = 3, kernel_size = (3, 3))
>>> output = depthwise_conv2d(input, weight)
>>> output.shape == (10, 96, 30, 30)
"""
@prim_attr_register
def __init__(self,
channel_multiplier,
kernel_size,
mode=3,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
"""init DepthwiseConv2dNative"""
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name)
if self.stride[0] != self.stride[1]:
raise ValueError("The height and width of stride should be equal,"
f"but got height:{self.stride[0]}, width:{self.stride[1]}")
self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1]))
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name)
if self.dilation[0] != self.dilation[1]:
raise ValueError("The height and width of dilation should be equal,"
f"but got height:{self.dilation[0]}, width:{self.dilation[1]}")
self.add_prim_attr('dilation', (1, 1, self.dilation[0], self.dilation[1]))
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer("mode", mode, 3, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.channel_multiplier = validator.check_integer("channel_multiplier", channel_multiplier, 0, Rel.GT,
self.name)
self.group = validator.check_integer("group", group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check("x_shape[1]", x_shape[1], "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_n, _, kernel_size_h, kernel_size_w = w_shape
_, _, stride_h, stride_w = self.stride
_, _, dilation_h, dilation_w = self.dilation
if kernel_size_n != 1:
raise ValueError(f"The batch of input weight should be 1, but got {kernel_size_n}")
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = (pad_top, pad_bottom, pad_left, pad_right)
self.add_prim_attr('pads', self.pad_list)
out_channel = self.channel_multiplier * x_shape[1]
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class _Pool(PrimitiveWithInfer):
r"""
Performs max/avg pooling operation.
Args:
ksize (Union[int, tuple[int]]): The size of the kernel, that should be a tuple
of two `int` for height and width. Default: 1.
strides (Union[int, tuple[int]]): The stride of the window, that should be
a tuple of two `int` for height and width. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type('ksize', ksize, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME'], self.name)
self.add_prim_attr("padding", self.padding)
self.is_maxpoolwithargmax = (self.name == "MaxPoolWithArgmax")
if not self.is_maxpoolwithargmax:
self.add_prim_attr('data_format', "NCHW")
self.ksize = _check_positive_int_or_tuple("ksize", ksize, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.ksize = (1, self.ksize[-2], self.ksize[-1], 1)
self.add_prim_attr("ksize", self.ksize)
self.strides = _check_positive_int_or_tuple("strides", strides, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.strides = (1, self.strides[-2], self.strides[-1], 1)
self.add_prim_attr("strides", self.strides)
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
batch, channel, input_h, input_w = x_shape
if self.is_maxpoolwithargmax:
_, kernel_h, kernel_w, _ = self.ksize
_, stride_h, stride_w, _ = self.strides
else:
_, _, kernel_h, kernel_w = self.ksize
_, _, stride_h, stride_w = self.strides
if self.padding == "VALID":
out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h)
out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w)
elif self.padding == "SAME":
out_h = math.ceil(input_h / stride_h)
out_w = math.ceil(input_w / stride_w)
out_shape = [batch, channel, out_h, out_w]
for shape_value in out_shape:
if shape_value <= 0:
raise ValueError(f"For '{self.name}' The kernel size is not valid, "
f"please check it if is larger than data's shape size.")
return out_shape
def infer_dtype(self, x_dtype):
validator.check_subclass("input", x_dtype, mstype.tensor, self.name)
return x_dtype
class MaxPool(_Pool):
r"""
Max pooling operation.
Applies a 2D max pooling over an input Tensor which can be regarded as a composition of 2D planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_op = P.MaxPool(padding="VALID", ksize=2, strides=1)
>>> output_tensor = maxpool_op(input_tensor)
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPool, self).__init__(ksize, strides, padding)
class MaxPoolWithArgmax(_Pool):
r"""
Performs max pooling on the input Tensor and return both max values and indices.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value and arg value,
is an int number that represents height and width are both ksize, or a tuple of
two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Data type should be float16 or float32.
Outputs:
Tuple of 2 Tensor, the maxpool result and where max values from.
- **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
- **mask** (Tensor) - Max values' index represented by the mask.
Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_arg_op = P.MaxPoolWithArgmax(padding="VALID", ksize=2, strides=1)
>>> output_tensor, argmax = maxpool_arg_op(input_tensor)
"""
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPoolWithArgmax, self).__init__(ksize, strides, padding)
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_gpu = context.get_context("device_target") == "GPU"
def infer_shape(self, x_shape):
out_shape = _Pool.infer_shape(self, x_shape)
_, _, out_h, out_w = out_shape
_, kernel_h, kernel_w, _ = self.ksize
argmax_shape = []
if self.is_tbe:
for i in range(4):
if i == 2:
dim = kernel_h * kernel_w
argmax_shape.append(dim)
elif i == 3:
dim = math.ceil(out_h * out_w / 16) + 1
argmax_shape.append(dim)
else:
argmax_shape.append(x_shape[i])
else:
argmax_shape = out_shape
return out_shape, argmax_shape
def infer_dtype(self, x_dtype):
out_dtype = x_dtype
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
argmax_dtype = mstype.uint16
if self.is_gpu:
argmax_dtype = mstype.int32
return out_dtype, argmax_dtype
class AvgPool(_Pool):
r"""
Average pooling operation.
Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, AvgPool2d outputs
regional average in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \frac{1}{h_{ker} * w_{ker}} \sum_{m=0}^{h_{ker}-1} \sum_{n=0}^{w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the average value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.avgpool_op = P.AvgPool(padding="VALID", ksize=2, strides=1)
>>>
>>> def construct(self, x):
>>> result = self.avgpool_op(x)
>>> return result
>>>
>>> input_x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
>>> net = Net()
>>> result = net(input_x)
[[[[ 2.5 3.5 4.5]
[ 6.5 7.5 8.5]]
[[ 14.5 15.5 16.5]
[ 18.5 19.5 20.5]]
[[ 26.5 27.5 28.5]
[ 30.5 31.5 32.5]]]]
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
if context.get_context("device_target") == "GPU":
self.target = "GPU"
elif context.get_context("enable_ge"):
self.target = "GE"
else:
self.target = "OTHER"
super(AvgPool, self).__init__(ksize, strides, padding)
class Conv2DBackpropInput(PrimitiveWithInfer):
"""
Computes the gradients of convolution with respect to the input.
Args:
out_channel (int): The dimensionality of the output space.
kernel_size (Union[int, tuple[int]]): The size of the convolution window.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union[int, tuple[int]]): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the
padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly.
mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
stride (Union[int. tuple[int]]): The stride to be applied to the convolution filter. Default: 1.
dilation (Union[int. tuple[int]]): Specifies the dilation rate to be used for the dilated convolution.
Default: 1.
group (int): Splits input into groups. Default: 1.
Returns:
Tensor, the gradients of convolution.
Examples:
>>> dout = Tensor(np.ones([10, 32, 30, 30]), mindspore.float32)
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
>>> x = Tensor(np.ones([10, 32, 32, 32]))
>>> conv2d_backprop_input = P.Conv2DBackpropInput(out_channel=32, kernel_size=3)
>>> conv2d_backprop_input(dout, weight, F.shape(x))
"""
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
pad_mode="valid",
pad=0,
pad_list=None,
mode=1,
stride=1,
dilation=1,
group=1):
"""init Conv2DBackpropInput"""
self.init_prim_io_names(inputs=['out_backprop', 'filter', 'input_sizes'], outputs=['output'])
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=False)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
pad_mode = pad_mode.upper()
self.add_prim_attr('pad_mode', pad_mode)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('data_format', "NCHW")
if pad_list:
for x in pad_list:
validator.check_integer('element of pad_list', x, 0, Rel.GE, self.name)
self.pad_list = pad_list
def __infer__(self, doutput, w, x_size):
x_size_v = x_size['value']
validator.check_value_type('x_size', x_size_v, [tuple], self.name)
for i, dim_len in enumerate(x_size_v):
validator.check_value_type("x_size[%d]" % i, dim_len, [int], self.name)
args = {'doutput': doutput['dtype'], 'w': w['dtype']}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
# infer shape
dout_shape = doutput['shape']
kernel_h = self.kernel_size[0]
kernel_w = self.kernel_size[1]
stride_h = self.stride[0]
stride_w = self.stride[1]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
# default pad mode is valid
pad_list = (0, 0, 0, 0)
if self.pad_list:
pad_list = tuple(self.pad_list)
elif self.pad_mode == "SAME":
pad_needed_h = max(0, (dout_shape[2] - 1) * stride_h + dilation_h * (kernel_h - 1) + 1 - x_size_v[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (dout_shape[3] - 1) * stride_w + dilation_w * (kernel_w - 1) + 1 - x_size_v[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
pad_list = (pad_top, pad_bottom, pad_left, pad_right)
elif self.pad_mode == 'PAD':
pad_list = self.padding
self.add_prim_attr('pad_list', pad_list)
out = {
'value': None,
'shape': x_size_v,
'dtype': doutput['dtype'],
}
return out
class BiasAdd(PrimitiveWithInfer):
r"""
Returns sum of input and bias tensor.
Adds the 1-D bias tensor to the input tensor, and broadcasts the shape on all axis
except for the channel axis.
Inputs:
- **input_x** (Tensor) - The input tensor. The shape can be 2-4 dimensions.
- **bias** (Tensor) - The bias tensor, with shape :math:`(C)`.
The shape of `bias` must be the same as `input_x` in the second dimension.
Outputs:
Tensor, with the same shape and type as `input_x`.
Examples:
>>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32)
>>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32)
>>> bias_add = P.BiasAdd()
>>> bias_add(input_x, bias)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x', 'b'], outputs=['output'])
self.add_prim_attr('data_format', 'NCHW')
def infer_shape(self, x_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.GE, self.name)
validator.check_integer("bias rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "x_shape[1]", x_shape[1], Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, b_type):
args = {"input_x": x_type, "bias": b_type}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_type
class TopK(PrimitiveWithInfer):
"""
Finds values and indices of the `k` largest entries along the last dimension.
Args:
sorted (bool): If true, the resulting elements will
be sorted by the values in descending order. Default: False.
Inputs:
- **input_x** (Tensor) - Input to be computed, data type should be float16, float32 or int32.
- **k** (int) - Number of top elements to be computed along the last dimension, constant input is needed.
Outputs:
Tuple of 2 Tensor, the values and the indices.
- **values** (Tensor) - The `k` largest elements along each last dimensional slice.
- **indices** (Tensor) - The indices of values within the last dimension of input.
Examples:
>>> topk = P.TopK(sorted=True)
>>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16)
>>> k = 3
>>> values, indices = topk(input_x, k)
>>> assert values == Tensor(np.array([5, 4, 3]), mstype.float16)
>>> assert indices == Tensor(np.array([4, 3, 2]), mstype.int32)
"""
@prim_attr_register
def __init__(self, sorted=False):
validator.check_value_type("sorted", sorted, [bool], self.name)
self.init_prim_io_names(inputs=['input', 'k'],
outputs=['values', 'indices'])
def __infer__(self, input_x, k):
x_dtype = input_x['dtype']
valid_types = (mstype.int32, mstype.float16, mstype.float32)
validator.check_tensor_type_same({'x': x_dtype}, valid_types, self.name)
k_v = k['value']
validator.check_value_type('k', k_v, (int,), self.name)
x_shape = list(input_x['shape'])
ndim = len(x_shape) - 1
x_shape[ndim] = k_v
return {'shape': (x_shape, x_shape),
'dtype': (x_dtype, mstype.int32),
'value': None}
class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Gets the softmax cross-entropy value between logits and labels which shoule be one-hot encoding.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = softmax(X_{ij}) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)}
.. math::
loss_{ij} = -\sum_j{Y_{ij} * ln(p_{ij})}
Inputs:
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type should be float16 or float32.
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N, C)`, has the same data type with `logits`.
Outputs:
Tuple of 2 Tensor, the loss shape is `(N,)`, and the dlogits with the same shape as `logits`.
Examples:
>>> logits = Tensor([[2, 4, 1, 4, 5], [2, 1, 2, 4, 3]], mindspore.float32)
>>> labels = Tensor([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0]], mindspore.float32)
>>> softmax_cross = P.SoftmaxCrossEntropyWithLogits()
>>> loss, backprop = softmax_cross(logits, labels)
([0.5899297, 0.52374405], [[0.02760027, 0.20393994, 0.01015357, 0.20393994, -0.44563377],
[0.08015892, 0.02948882, 0.08015892, -0.4077012, 0.21789455]])
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape", logits_shape, "labels_shape", labels_shape, Rel.EQ, self.name)
loss_shape = [logits_shape[0]]
dlogits_shape = logits_shape
return (loss_shape, dlogits_shape)
def infer_dtype(self, logits_type, labels_type):
args = {"logits": logits_type, "labels": labels_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return (logits_type, logits_type)
class SparseSoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Computes the softmax cross-entropy value between logits and sparse encoding labels.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = softmax(X_{ij}) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)}
.. math::
loss_{ij} = \begin{cases} -ln(p_{ij}), &j = y_i \cr -ln(1 - p_{ij}), & j \neq y_i \end{cases}
.. math::
loss = \sum_{ij} loss_{ij}
Args:
is_grad (bool): If it's true, this operation returns the computed gradient. Default: False.
Inputs:
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type should be float16 or float32.
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N)`.
Data type should be int32 or int64.
Outputs:
Tensor, if `is_grad` is False, the output tensor is the value of loss which is a scalar tensor;
if `is_grad` is True, the output tensor is the gradient of input with the same shape as `logits`.
Examples:
Please refer to the usage in nn.SoftmaxCrossEntropyWithLogits source code.
"""
@prim_attr_register
def __init__(self, is_grad=False):
self.init_prim_io_names(inputs=['features', 'labels'], outputs=['output'])
self.is_grad = is_grad
self.add_prim_attr('sens', 1.0)
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape[0]", logits_shape[0], "labels_shape[0]", labels_shape[0], Rel.EQ, self.name)
loss_shape = []
if self.is_grad:
return logits_shape
return loss_shape
def infer_dtype(self, logits_type, labels_type):
validator.check_tensor_type_same({"logits": logits_type}, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"labels": labels_type}, (mstype.int32, mstype.int64), self.name)
return logits_type
class ApplyMomentum(PrimitiveWithInfer):
"""
Optimizer that implements the Momentum algorithm.
Refer to the paper `On the importance of initialization and momentum in deep
learning <https://dl.acm.org/doi/10.5555/3042817.3043064>`_ for more details.
Inputs of `variable`, `accumulation` and `gradient` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
Data type conversion of Parameter is not supported. RuntimeError exception will be thrown.
Args:
use_locking (bool): Enable a lock to protect the update of variable and accumlation tensors. Default: False.
use_nesterov (bool): Enable Nesterov momentum. Default: False.
gradient_scale (float): The scale of the gradient. Default: 1.0.
Inputs:
- **variable** (Parameter) - Weights to be updated. data type should be float.
- **accumulation** (Parameter) - Accumulated gradient value by moment weight.
Has the same data type with `variable`.
- **learning_rate** (Union[Number, Tensor]) - The learning rate value, should be a float number or
a scalar tensor with float data type.
- **gradient** (Tensor) - Gradients, has the same data type as `variable`.
- **momentum** (Union[Number, Tensor]) - Momentum, should be a float number or
a scalar tensor with float data type.
Outputs:
Tensor, parameters to be updated.
Examples:
Please refer to the usage in nn.ApplyMomentum.
"""
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accumulation', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('learning_rate', dtype=sig.sig_dtype.T1),
sig.make_sig('gradient', dtype=sig.sig_dtype.T),
sig.make_sig('momentum', dtype=sig.sig_dtype.T2),
)
@prim_attr_register
def __init__(self, use_nesterov=False, use_locking=False, gradient_scale=1.0):
self.init_prim_io_names(inputs=['variable', 'accumulation', 'learning_rate', 'gradient', 'momentum'],
outputs=['output'])
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_ge = context.get_context("enable_ge")
def infer_shape(self, v_shape, a_shape, l_shape, g_shape, m_shape):
if not self.is_ge and self.is_tbe:
return v_shape, v_shape
return v_shape
def infer_dtype(self, v_dtype, a_dtype, l_dtype, g_dtype, m_dtype):
valid_types = [mstype.float16, mstype.float32, mstype.float64]
if v_dtype != mstype.type_refkey and a_dtype != mstype.type_refkey:
validator.check_tensor_type_same({"v": v_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"a": a_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l_dtype": l_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"g_dtype": g_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"m_dtype": m_dtype}, valid_types, self.name)
if not self.is_ge and self.is_tbe:
return g_dtype, g_dtype
return g_dtype
class SmoothL1Loss(PrimitiveWithInfer):
r"""
Computes smooth L1 loss, a robust L1 loss.
SmoothL1Loss is a Loss similar to MSELoss but less sensitive to outliers as described in the
`Fast R-CNN <https://arxiv.org/abs/1504.08083>`_ by Ross Girshick.
Note:
Sets input prediction as `X`, input target as `Y`, output as `loss`. Then,
.. math::
\text{SmoothL1Loss} = \begin{cases} \frac{0.5 x^{2}}{\text{beta}}, &if \left |x \right | < \text{beta} \cr
\left |x \right|-0.5 \text{beta}, &\text{otherwise}\end{cases}
Args:
beta (float): A parameter used to control the point where the function will change from
quadratic to linear. Default: 1.0.
Inputs:
- **prediction** (Tensor) - Predict data. Data type should be float16 or float32.
- **target** (Tensor) - Ground truth data, with the same type and shape as `prediction`.
Outputs:
Tensor, with the same type and shape as `prediction`.
Examples:
>>> loss = P.SmoothL1Loss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> loss(input_data, target_data)
[0, 0, 0.5]
"""
@prim_attr_register
def __init__(self, beta=1.0):
validator.check_value_type('beta', beta, [float], self.name)
validator.check('beta', beta, '', 0, Rel.GT, self.name)
self.init_prim_io_names(inputs=['prediction', 'target'], outputs=['output'])
def infer_shape(self, prediction, target):
validator.check('prediction shape', prediction, 'target shape', target, Rel.EQ, self.name)
return prediction
def infer_dtype(self, prediction, target):
args = {"prediction": prediction, "target": target}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return prediction
class L2Loss(PrimitiveWithInfer):
"""
Calculates half of the L2 norm of a tensor without using the `sqrt`.
Set `input_x` as x and output as loss.
.. math::
loss = sum(x ** 2) / nelement(x)
:math:`nelement(x)` represents the number of `input_x`.
Inputs:
- **input_x** (Tensor) - A input Tensor. Data type should be float16 or float32.
Outputs:
Tensor, has the same dtype as `input_x`. The output tensor is the value of loss which is a scalar tensor.
Examples
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float16)
>>> l2_loss = P.L2Loss()
>>> l2_loss(input_x)
7.0
"""
@prim_attr_register
def __init__(self):
"""init L2Loss"""
def infer_shape(self, input_x):
loss_shape = []
return loss_shape
def infer_dtype(self, x_type):
validator.check_subclass("x_type", x_type, mstype.tensor, self.name)
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({'x_type': x_type}, valid_types, self.name)
return x_type
class DataFormatDimMap(PrimitiveWithInfer):
"""
Returns the dimension index in the destination data format given in the source data format.
Args:
src_format (string): An optional value for source data format. Default: 'NHWC'.
dst_format (string): An optional value for destination data format. Default: 'NCHW'.
Inputs:
- **input_x** (Tensor) - A Tensor with each element as a dimension index in source data format.
The suggested values is in the range [-4, 4). It's type is int32.
Outputs:
Tensor, has the same type as the `input_x`.
Examples:
>>> x = Tensor([0, 1, 2, 3], mindspore.int32)
>>> dfdm = P.DataFormatDimMap()
>>> dfdm(x)
[0 3 1 2]
"""
@prim_attr_register
def __init__(self, src_format='NHWC', dst_format='NCHW'):
valid_values = ['NHWC', 'NCHW']
self.src_format = validator.check_string("src_format", src_format, valid_values, self.name)
self.dst_format = validator.check_string("dst_format", dst_format, valid_values, self.name)
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_type):
validator.check_subclass("x", x_type, mstype.tensor, self.name)
valid_types = [mstype.int32]
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
return x_type
class RNNTLoss(PrimitiveWithInfer):
"""
Computes the RNNTLoss and its gradient with respect to the softmax outputs.
Args:
blank_label (int): blank label. Default: 0.
Inputs:
- **acts** (Tensor) - Tensor of shape :math:`(B, T, U, V)`. Data type should be float16 or float32.
- **labels** (Tensor[int32]) - Tensor of shape :math:`(B, U-1)`.
- **input_lengths** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
- **label_lebgths** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
Outputs:
- **costs** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
- **grads** (Tensor[int32]) - Has the same shape as `acts`.
Examples:
>>> B, T, U, V = 1, 2, 3, 5
>>> acts = np.random.random((B, T, U, V)).astype(np.float32)
>>> labels = np.array([[1, 2]]).astype(np.int32)
>>> input_length = np.array([T] * B).astype(np.int32)
>>> label_length = np.array([len(l) for l in labels]).astype(np.int32)
>>> rnnt_loss = P.RNNTLoss(blank_label=blank)
>>> costs, grads = rnnt_loss(Tensor(acts), Tensor(labels), Tensor(input_length), Tensor(label_length))
"""
@prim_attr_register
def __init__(self, blank_label=0):
validator.check_value_type('blank_label', blank_label, [int], self.name)
self.init_prim_io_names(inputs=['acts', 'labels', 'input_length', 'label_length'],
outputs=['costs', 'grads'])
def infer_shape(self, acts_shape, labels_shape, input_length_shape, label_length_shape):
validator.check_integer('acts_rank', len(acts_shape), 4, Rel.EQ, self.name)
validator.check_integer('labels_rank', len(labels_shape), 2, Rel.EQ, self.name)
validator.check_integer('input_length_rank', len(input_length_shape), 1, Rel.EQ, self.name)
validator.check_integer('label_length_rank', len(label_length_shape), 1, Rel.EQ, self.name)
validator.check('labels shape[0]', labels_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('labels shape[1]', labels_shape[1], 'acts shape[2]-1', acts_shape[2]-1, Rel.EQ, self.name)
validator.check('input_length size', input_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('label_length size', label_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
costs_shape = (acts_shape[0],)
return (costs_shape, acts_shape)
def infer_dtype(self, acts_type, labels_type, input_length_type, label_length_type):
validator.check_subclass("acts_type", acts_type, mstype.tensor, self.name)
validator.check_subclass("labels_type", labels_type, mstype.tensor, self.name)
validator.check_subclass("input_length_type", input_length_type, mstype.tensor, self.name)
validator.check_subclass("label_length_type", label_length_type, mstype.tensor, self.name)
validator.check_tensor_type_same({"acts_type": acts_type}, [mstype.float32, mstype.float16], self.name)
validator.check_tensor_type_same({"labels_type": labels_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"input_length_type": input_length_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"label_length_type": label_length_type}, [mstype.int32], self.name)
return (acts_type, acts_type)
class SGD(PrimitiveWithInfer):
"""
Computes stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from On the importance of
initialization and momentum in deep learning.
Note:
For details, please refer to `nn.SGD` source code.
Args:
dampening (float): The dampening for momentum. Default: 0.0.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0.
nesterov (bool): Enable Nesterov momentum. Default: False.
Inputs:
- **parameters** (Tensor) - Parameters to be updated. With float16 or float32 data type.
- **gradient** (Tensor) - Gradients. With float16 or float32 data type.
- **learning_rate** (Tensor) - Learning rate, a scalar tensor with float16 or float32 data type.
e.g. Tensor(0.1, mindspore.float32)
- **accum** (Tensor) - Accum(velocity) to be updated. With float16 or float32 data type.
- **momentum** (Tensor) - Momentum, a scalar tensor with float16 or float32 data type.
e.g. Tensor(0.1, mindspore.float32).
- **stat** (Tensor) - States to be updated with the same shape as gradient. With float16 or float32 data type.
Outputs:
Tensor, parameters to be updated.
Examples:
>>> sgd = P.SGD()
>>> parameters = Tensor(np.array([2, -0.5, 1.7, 4]), mindspore.float32)
>>> gradient = Tensor(np.array([1, -1, 0.5, 2]), mindspore.float32)
>>> learning_rate = Tensor(0.01, mindspore.float32)
>>> accum = Tensor(np.array([0.1, 0.3, -0.2, -0.1]), mindspore.float32)
>>> momentum = Tensor(0.1, mindspore.float32)
>>> stat = Tensor(np.array([1.5, -0.3, 0.2, -0.7]), mindspore.float32)
>>> result = sgd(parameters, gradient, learning_rate, accum, momentum, stat)
"""
@prim_attr_register
def __init__(self, dampening=0.0, weight_decay=0.0, nesterov=False):
validator.check_value_type("nesterov", nesterov, [bool], self.name)
if nesterov and dampening != 0:
raise ValueError(f"Nesterov need zero dampening!")
self.init_prim_io_names(inputs=['parameters', 'gradient', 'learning_rate', 'accum', 'momentum', 'stat'],
outputs=['output'])
def infer_shape(self, parameters_shape, gradient_shape, learning_rate_shape,
accum_shape, momentum_shape, stat_shape):
validator.check_integer(f'parameters rank', len(parameters_shape), 0, Rel.GT, self.name)
validator.check_integer(f'gradient rank', len(gradient_shape), 0, Rel.GE, self.name)
validator.check_integer(f'learning rate rank', len(learning_rate_shape), 0, Rel.GE, self.name)
validator.check_integer(f'accumulation rank', len(accum_shape), 0, Rel.GT, self.name)
validator.check_integer(f'momentum rank', len(momentum_shape), 0, Rel.GE, self.name)
validator.check_integer(f'stat rank', len(stat_shape), 0, Rel.GE, self.name)
validator.check("gradient shape", gradient_shape, "stat shape", stat_shape, Rel.EQ, self.name)
return parameters_shape
def infer_dtype(self, parameters_dtype, gradient_dtype, learning_rate_dtype,
accum_dtype, momentum_dtype, stat_dtype):
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({"parameters": parameters_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"gradient": gradient_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"learning_rate": learning_rate_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"accum": accum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"momentum": momentum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"stat": stat_dtype}, valid_types, self.name)
return parameters_dtype
class ApplyRMSProp(PrimitiveWithInfer):
"""
Optimizer that implements the Root Mean Square prop(RMSProp) algorithm.
Please refer to the usage in source code of `nn.RMSProp`.
Note:
Update `var` according to the RMSProp algorithm.
.. math::
s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2
.. math::
m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} + \\epsilon}} \\nabla Q_{i}(w)
.. math::
w = w - m_{t}
where :math:`w` represents `var`, which will be updated.
:math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,
:math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.
:math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`.
:math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`.
Args:
use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False.
Inputs:
- **var** (Tensor) - Weights to be update.
- **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`.
- **moment** (Tensor) - Delta of `var`, must have the same type as `var`.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. Should be a float number or
a scalar tensor with float16 or float32 data type.
- **grad** (Tensor) - Gradients, must have the same type as `var`.
- **decay** (float) - Decay rate. Only constant value is allowed.
- **momentum** (float) - Momentum. Only constant value is allowed.
- **epsilon** (float) - Ridge term. Only constant value is allowed.
Outputs:
Tensor, parameters to be update.
Examples:
>>> apply_rms = P.ApplyRMSProp()
>>> input_x = Tensor(1., mindspore.float32)
>>> mean_square = Tensor(2., mindspore.float32)
>>> moment = Tensor(1., mindspore.float32)
>>> grad = Tensor(2., mindspore.float32 )
>>> learning_rate = Tensor(0.9, mindspore.float32)
>>> decay = 0.0
>>> momentum = 1e-10
>>> epsilon = 0.001
>>> result = apply_rms(input_x, mean_square, moment, learning_rate, grad, decay, momentum, epsilon)
(-2.9977674, 0.80999994, 1.9987665)
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'mean_square', 'moment', 'learning_rate', 'grad',
'rho', 'momentum', 'epsilon'], outputs=['output'])
self.is_ge = context.get_context("enable_ge")
self.is_d = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_square_shape, moment_shape, learning_rate_shape, grad_shape, decay_shape,
momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if not self.is_ge and self.is_d:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_dtype, mean_square_dtype, moment_dtype, learning_rate_dtype, grad_dtype, decay_dtype,
momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_decay = {"decay": decay_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_decay, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "decay": decay_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if not self.is_ge and self.is_d:
return var_dtype, var_dtype, var_dtype
return var_dtype
def infer_value(self, var, mean_square, moment, learning_rate, grad, decay, momentum, epsilon):
if decay is None or momentum is None or epsilon is None:
raise ValueError(f"For {self.name}, decay, momentum, epsilon must be const.")
class ApplyCenteredRMSProp(PrimitiveWithInfer):
"""
Optimizer that implements the centered RMSProp algorithm.
Please refer to the usage in source code of `nn.RMSProp`.
Note:
Update `var` according to the centered RMSProp algorithm.
.. math::
g_{t} = \\rho g_{t-1} + (1 - \\rho)\\nabla Q_{i}(w)
.. math::
s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2
.. math::
m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} - g_{t}^2 + \\epsilon}} \\nabla Q_{i}(w)
.. math::
w = w - m_{t}
where :math:`w` represents `var`, which will be updated.
:math:`g_{t}` represents `mean_gradient`, :math:`g_{t-1}` is the last momentent of :math:`g_{t}`.
:math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,
:math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.
:math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`.
:math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`.
Args:
use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False.
Inputs:
- **var** (Tensor) - Weights to be update.
- **mean_gradient** (Tensor) - Mean gradients, must have the same type as `var`.
- **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`.
- **moment** (Tensor) - Delta of `var`, must have the same type as `var`.
- **grad** (Tensor) - Gradients, must have the same type as `var`.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. Should be a float number or
a scalar tensor with float16 or float32 data type.
- **decay** (float) - Decay rate.
- **momentum** (float) - Momentum.
- **epsilon** (float) - Ridge term.
Outputs:
Tensor, parameters to be update.
Examples:
>>> centered_rms_prop = P.ApplyCenteredRMSProp()
>>> input_x = Tensor(np.arange(-6, 6).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> mean_grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> mean_square = Tensor(np.arange(-8, 4).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> moment = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> learning_rate = Tensor(0.9, mindspore.float32)
>>> decay = 0.0
>>> momentum = 1e-10
>>> epsilon = 0.05
>>> result = centered_rms_prop(input_x, mean_grad, mean_square, moment, grad,
>>> learning_rate, decay, momentum, epsilon)
[[[ -6. -9.024922]
[-12.049845 -15.074766]
[-18.09969 -21.124613]]
[[-24.149532 -27.174456]
[-30.199379 -33.2243 ]
[-36.249226 -39.274143]]]
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_ascend = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_gradient_shape, mean_square_shape, moment_shape, grad_shape,
learning_rate_shape, decay_shape, momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_gradient_shape", mean_gradient_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if self.is_ascend:
return var_shape, mean_gradient_shape, mean_square_shape, moment_shape
return var_shape
def infer_dtype(self, var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype, grad_dtype,
learning_rate_dtype, rho_dtype, momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_gradient": mean_gradient_dtype,
"mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_rho = {"rho": rho_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_rho, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "rho": rho_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if self.is_ascend:
return var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype
return var_dtype
class LayerNorm(Primitive):
r"""
Applies the Layer Normalization to the input tensor.
This operator will normalize the input tensor on given axis. LayerNorm is described in the paper
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNorm,
the value should be in [-1, rank(input)). Default: 1.
begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to
apply LayerNorm, the value should be in [-1, rank(input)). Default: 1.
epsilon (float): A value added to the denominator for numerical stability. Default: 1e-7.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)`.
The input of LayerNorm.
- **gamma** (Tensor) - Tensor of shape :math:`(P_0, \ldots, P_\text{begin_params_axis})`.
The learnable parameter `gamma` as the scale on norm.
- **beta** (Tensor) - Tensor of shape :math:`(P_0, \ldots, P_\text{begin_params_axis})`.
The learnable parameter `beta` as the scale on norm.
Outputs:
tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
The shape is :math:`(N, C)`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
>>> beta = Tensor(np.ones([3]), mindspore.float32)
>>> layer_norm = P.LayerNorm()
>>> output = layer_norm(input_x, gamma, beta)
([[-0.22474492, 1., 2.2247488], [-0.22474492, 1., 2.2247488]],
[[2.], [2.]], [[0.6666667], [0.6666667]])
"""
@prim_attr_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7):
validator.check_value_type('begin_norm_axis', begin_norm_axis, [int], self.name)
validator.check_value_type('begin_params_axis', begin_params_axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [float], self.name)
class L2Normalize(PrimitiveWithInfer):
r"""
L2 normalization Operator.
This operator will normalizes the input using the given axis. The function is shown as follows:
.. math::
\text{output} = \frac{x}{\sqrt{\text{max}(\text{sum} (\text{input_x}^2), \epsilon)}},
where :math:`\epsilon` is epsilon.
Args:
axis (int): The begin axis for the input to apply L2 normalize. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-4.
Inputs:
- **input_x** (Tensor) - Input to compute the normalization. Data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the input.
Examples:
>>> l2_normalize = P.L2Normalize()
>>> input_x = Tensor(np.random.randint(-256, 256, (2, 3, 4)), mindspore.float32)
>>> result = l2_normalize(input_x)
[[[-0.47247353 -0.30934513 -0.4991462 0.8185567 ]
[-0.08070751 -0.9961299 -0.5741758 0.09262337]
[-0.9916556 -0.3049123 0.5730487 -0.40579924]
[[-0.88134485 0.9509498 -0.86651784 0.57442576]
[ 0.99673784 0.08789381 -0.8187321 0.9957012 ]
[ 0.12891524 -0.9523804 -0.81952125 0.91396334]]]
"""
@prim_attr_register
def __init__(self, axis=0, epsilon=1e-4):
validator.check_value_type('axis', axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [int, float], self.name)
def infer_shape(self, input_x):
dim = len(input_x)
validator.check_int_range('axis value', self.axis, -dim, dim, Rel.INC_LEFT, self.name)
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("x", input_x, mstype.tensor, self.name)
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class DropoutGenMask(Primitive):
"""
Generates the mask value for the input shape.
Args:
Seed0 (int): Seed0 value for random generating. Default: 0.
Seed1 (int): Seed1 value for random generating. Default: 0.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
- **keep_prob** (Tensor) - The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout_gen_mask = P.DropoutGenMask()
>>> shape = (20, 16, 50)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> mask = dropout_gen_mask(shape, keep_prob)
"""
@prim_attr_register
def __init__(self, Seed0=0, Seed1=0):
self.init_prim_io_names(inputs=['shape', 'keep_prob'], outputs=['output'])
validator.check_value_type("Seed0", Seed0, [int], self.name)
validator.check_value_type("Seed1", Seed1, [int], self.name)
self.add_prim_attr("_random_effect", True)
class DropoutDoMask(PrimitiveWithInfer):
"""
Applies dropout mask on the input tensor.
Take the mask output of DropoutGenMask as input, and apply dropout on the input.
Inputs:
- **input_x** (Tensor) - The input tensor.
- **mask** (Tensor) - The mask to be applied on `input_x`, which is the output of `DropoutGenMask`. And the
shape of `input_x` must be the same as the value of `DropoutGenMask`'s input `shape`. If input wrong `mask`,
the output of `DropoutDoMask` are unpredictable.
- **keep_prob** (Tensor) - The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units. The value of `keep_prob` is the same as the input `keep_prob` of
`DropoutGenMask`.
Outputs:
Tensor, the value that applied dropout on.
Examples:
>>> x = Tensor(np.ones([20, 16, 50]), mindspore.float32)
>>> shape = (20, 16, 50)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> dropout_gen_mask = P.DropoutGenMask()
>>> dropout_do_mask = P.DropoutDoMask()
>>> mask = dropout_gen_mask(shape, keep_prob)
>>> output = dropout_do_mask(x, mask, keep_prob)
>>> assert output.shape == (20, 16, 50)
"""
@prim_attr_register
def __init__(self):
pass
def __infer__(self, input_x, mask, keep_prob):
input_x_shape = input_x['shape']
mask_shape = mask['shape']
keep_prob_shape = keep_prob['shape']
validator.check("keep_prob's dim", len(keep_prob_shape), '0(scalar)', 0, Rel.EQ, self.name)
size_x = reduce(lambda x, y: x * y, input_x_shape)
if len(mask_shape) != 1:
raise ValueError("DropoutDoMask mask shape should be 1-dimension.")
size_y = mask_shape[0] * 8
if size_x > size_y:
raise ValueError(f"DropoutDoMask y mask do not math input input_x shape:"
"{input_x_shape}, mask shape: {mask_shape}.")
validator.check_tensor_type_same({"input_x": input_x['dtype']}, [mstype.float32, mstype.float16, mstype.int32],
self.name)
validator.check_tensor_type_same({"input_mask": mask['dtype']}, [mstype.uint8], self.name)
keep_prob_v = keep_prob['value']
if keep_prob_v is not None:
validator.check_number_range('keep_prob', keep_prob_v.asnumpy(), 0, 1, Rel.INC_BOTH, self.name)
out = {'shape': input_x_shape,
'dtype': input_x['dtype'],
'value': None}
return out
class ResizeBilinear(PrimitiveWithInfer):
r"""
Resizes the image to certain size using bilinear interpolation.
The resizing only affects the lower two dimensions which represent the height and width. The input images
can be represented by different data types, but the data types of output images are always float32.
Args:
size (tuple[int]): A tuple of 2 int elements `(new_height, new_width)`, the new size for the images.
align_corners (bool): If it's true, rescale input by `(new_height - 1) / (height - 1)`,
which exactly aligns the 4 corners of images and resized images. If it's false,
rescale by `new_height / height`. Default: False.
Inputs:
- **input** (Tensor) - Image to be resized. Tensor of shape `(N_i, ..., N_n, height, width)`,
with data type of float32 or float16.
Outputs:
Tensor, resized image. Tensor of shape `(N_i, ..., N_n, new_height, new_width)` in `float32`.
Examples:
>>> tensor = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32)
>>> resize_bilinear = P.ResizeBilinear((5, 5))
>>> result = resize_bilinear(tensor)
>>> assert result.shape == (1, 1, 5, 5)
"""
@prim_attr_register
def __init__(self, size, align_corners=False):
pass
def infer_shape(self, input_shape):
input_shape = list(input_shape)
batch, channel, _, _ = input_shape
out_shape = [batch, channel]
for i in self.size:
out_shape.append(int(i))
return out_shape
def infer_dtype(self, input_dtype):
validator.check_tensor_type_same({'input_dtype': input_dtype}, [mstype.float16, mstype.float32], self.name)
return mstype.tensor_type(mstype.float32)
class OneHot(PrimitiveWithInfer):
r"""
Computes a one-hot tensor.
Makes a new tensor, whose locations represented by indices in `indices` take value `on_value`, while all
other locations take value `off_value`.
Note:
If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
Args:
axis (int): Position to insert the value. e.g. If `indices` shape is [n, c], and `axis` is `-1` the output shape
will be [n, c, depth], If `axis` is `0` the output shape will be [depth, n, c]. Default: -1.
Inputs:
- **indices** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
Data type must be int32.
- **depth** (int) - A scalar defining the depth of the one hot dimension.
- **on_value** (Tensor) - A value to fill in output when `indices[j] = i`. With data type of float16 or float32.
- **off_value** (Tensor) - A value to fill in output when `indices[j] != i`.
Has the same data type with as `on_value`.
Outputs:
Tensor, one_hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`.
Examples:
>>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)
>>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
>>> onehot = P.OneHot()
>>> result = onehot(indices, depth, on_value, off_value)
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
"""
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['indices', 'depth', 'on_value', 'off_value'], outputs=['output'])
validator.check_value_type("axis", axis, [int], self.name)
def __infer__(self, indices, depth, on_value, off_value):
# check type
validator.check_tensor_type_same({"indices": indices['dtype']}, (mstype.int32,), self.name)
validator.check_type_name("depth", depth['dtype'], mstype.int_type, self.name)
args = {"on_value": on_value['dtype'], "off_value": off_value['dtype']}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
# check shape
indices_shp = indices['shape']
validator.check_int_range("axis", self.axis, -1, len(indices_shp), Rel.INC_BOTH, self.name)
depth_val = depth['value']
validator.check_integer("depth", depth_val, 0, Rel.GE, self.name)
# create new dimension at end if self.axis is -1
_ = indices_shp.insert(self.axis, depth_val) if self.axis >= 0 else indices_shp.append(depth_val)
return {'shape': indices_shp,
'dtype': on_value['dtype'],
'value': None}
class Gelu(PrimitiveWithInfer):
r"""
Gaussian Error Linear Units activation function.
GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
<https://arxiv.org/abs/1810.04805>`_.
Gelu is defined as follows:
.. math::
\text{output} = 0.5 * x * (1 + erf(x / \sqrt{2})),
where :math:`erf` is the "Gauss error function" .
Inputs:
- **input_x** (Tensor) - Input to compute the Gelu with data type of float16 or float32.
Outputs:
Tensor, with the same type and shape as input.
Examples:
>>> tensor = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> gelu = P.Gelu()
>>> result = gelu(tensor)
"""
@prim_attr_register
def __init__(self):
"""init GeLU"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class GetNext(PrimitiveWithInfer):
"""
Returns the next element in the dataset queue.
Note:
The GetNext operation needs to be associated with network and it also depends on the init_dataset interface,
it can't be used directly as a single operation.
For details, please refer to `nn.DataWrapper` source code.
Args:
types (list[:class:`mindspore.dtype`]): The type of the outputs.
shapes (list[tuple[int]]): The dimensionality of the outputs.
output_num (int): The output number, length of `types` and `shapes`.
shared_name (str): The queue name of `init_dataset` interface.
Inputs:
No inputs.
Outputs:
tuple[Tensor], the output of Dataset. The shape is described in `shapes`
and the type is described is `types`.
Examples:
>>> get_next = P.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 2, 'shared_name')
>>> feature, label = get_next()
"""
@prim_attr_register
def __init__(self, types, shapes, output_num, shared_name):
validator.check_value_type("types", types, [list, tuple], self.name)
validator.check_value_type("shapes", shapes, [list, tuple], self.name)
validator.check("types length", len(types), "shapes length", len(shapes), Rel.EQ, self.name)
validator.check_value_type("output_num", output_num, [int], self.name)
def infer_shape(self):
return tuple(self.shapes)
def infer_dtype(self):
return tuple(self.types)
class PReLU(PrimitiveWithInfer):
r"""
Parametric Rectified Linear Unit activation function.
PReLU is described in the paper `Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification <https://arxiv.org/abs/1502.01852>`_. Defined as follows:
.. math::
prelu(x_i)= \max(0, x_i) + \min(0, w * x_i),
where :math:`x_i` is an element of an channel of the input.
Note:
1-dimensional input_x is not supported.
Inputs:
- **input_x** (Tensor) - Float tensor, representing the output of the preview layer.
With data type of float16 or float32.
- **weight** (Tensor) - Float Tensor, w > 0, there is only two shapes are legitimate,
1 or the number of channels at input. With data type of float16 or float32.
Outputs:
Tensor, with the same type as `input_x`.
Detailed information, please refer to `nn.PReLU`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.prelu = P.PReLU()
>>> def construct(self, input_x, weight):
>>> result = self.prelu(input_x, weight)
>>> return result
>>>
>>> input_x = Tensor(np.random.randint(-3, 3, (2, 3, 2)), mindspore.float32)
>>> weight = Tensor(np.array([0.1, 0.6, -0.3]), mindspore.float32)
>>> net = Net()
>>> result = net(input_x, weight)
[[[-0.1 1. ]
[ 0. 2. ]
[0. 0. ]]
[[-0.2 -0.1 ]
[2. -1.8000001]
[0.6 0.6 ]]]
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x_shape, weight_shape):
input_x_dim = len(input_x_shape)
weight_dim = len(weight_shape)
if input_x_dim == 1:
raise ValueError(f'For \'{self.name}\' input_x rank 1 is not supported.')
if weight_dim != 1:
raise ValueError(f'For \'{self.name}\' weight_dim must be 1, while weight_dim is {weight_dim}.')
if weight_shape[0] != input_x_shape[1] and weight_shape[0] != 1:
raise ValueError(f'For \'{self.name}\' channel of input_x and weight must be matched,'
f' while channel of input_x is {input_x_shape[1]},'
f' weight_shape[0] is {weight_shape[0]}.')
return input_x_shape
def infer_dtype(self, input_x_dtype, weight_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"input_x": input_x_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"weight": weight_dtype}, valid_types, self.name)
return input_x_dtype
class LSTM(PrimitiveWithInfer):
"""
Performs the long short term memory(LSTM) on the input.
Detailed information, please refer to `nn.LSTM`.
"""
@prim_attr_register
def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
self.input_size = validator.check_integer("input_size", input_size, 0, Rel.GT, self.name)
self.hidden_size = validator.check_integer("hidden_size", hidden_size, 0, Rel.GT, self.name)
self.num_layers = validator.check_integer("num_layers", num_layers, 0, Rel.GT, self.name)
self.has_bias = validator.check_value_type("has_bias", has_bias, (bool,), self.name)
self.bidirectional = validator.check_value_type("bidirectional", bidirectional, (bool,), self.name)
self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
self.dropout = validator.check_number_range('dropout', dropout, 0, 1, Rel.INC_BOTH, self.name)
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
def infer_shape(self, x_shape, h_shape, c_shape, w_shape):
# (seq, batch_size, feature)
validator.check_integer("x rank", len(x_shape), 3, Rel.EQ, self.name)
validator.check_integer("x[2]", x_shape[2], self.input_size, Rel.EQ, self.name)
# h and c should be same shape
validator.check_integer("h rank", len(h_shape), 3, Rel.EQ, self.name)
validator.check("h_shape", h_shape, "c_shape", c_shape, Rel.EQ, self.name)
# (num_layers * num_directions, batch, hidden_size)
validator.check_integer("h[0]", h_shape[0], self.num_layers * self.num_directions, Rel.EQ, self.name)
validator.check_integer("h[1]", h_shape[1], x_shape[1], Rel.EQ, self.name)
validator.check_integer("h[2]", h_shape[2], self.hidden_size, Rel.EQ, self.name)
y_shape = (x_shape[0], x_shape[1], self.hidden_size * self.num_directions)
# set arbitrary shape for reserved space
type_size = 4
gates_ws_ld = self.get_good_ld(self.hidden_size * 4, type_size)
states_ws_ld = self.get_good_ld(max(self.hidden_size, self.input_size), type_size)
self.ws_gates_size = self.num_layers * self.num_directions * x_shape[0] * x_shape[1] * gates_ws_ld * type_size
self.ws_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_c_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_diff_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * (2 + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_grid_comp_size = 0
self.page_size = 4096
current_offset = 0
current_offset += self.ws_gates_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_c_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_diff_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_grid_comp_size
reserved_shape = (current_offset, 1)
state_shape = (1, 1)
return (y_shape, h_shape, c_shape, reserved_shape, state_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype):
args = {'x': x_dtype, 'h': h_dtype, 'c': c_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, (mstype.float32, mstype.float16), self.name)
return (x_dtype, x_dtype, x_dtype, x_dtype, x_dtype)
def rnd_up(self, current_offset, page_size):
return ((current_offset + page_size - 1) // page_size) * page_size
def get_good_ld(self, dim, type_size):
ld = self.rnd_up(dim, 64 // type_size)
if ld * 256 == 0:
return ld + 64 // type_size
return ld
class SigmoidCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Uses the given logits to compute sigmoid cross entropy.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = sigmoid(X_{ij}) = \frac{1}{1 + e^{-X_{ij}}}
.. math::
loss_{ij} = -[Y_{ij} * ln(p_{ij}) + (1 - Y_{ij})ln(1 - p_{ij})]
Inputs:
- **logits** (Tensor) - Input logits.
- **label** (Tensor) - Ground truth label.
Outputs:
Tensor, with the same shape and type as input `logits`.
Examples:
>>> logits = Tensor(np.random.randn(2, 3).astype(np.float16))
>>> labels = Tensor(np.random.randn(2, 3).astype(np.float16))
>>> sigmoid = P.SigmoidCrossEntropyWithLogits()
>>> sigmoid(logits, labels)
"""
@prim_attr_register
def __init__(self):
"""Init SigmoidCrossEntropyWithLogits"""
self.init_prim_io_names(inputs=['predict', 'target'], outputs=['loss'])
def infer_shape(self, x_shape, y_shape):
validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_dtype, y_dtype):
args = {"x_dtype": x_dtype, "y_dtype": y_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_dtype
class Pad(PrimitiveWithInfer):
"""
Pads input tensor according to the paddings.
Args:
paddings (tuple): The shape of parameter `paddings` is (N, 2). N is the rank of input data. All elements of
paddings are int type. For the input in `D` th dimension, paddings[D, 0] indicates how many sizes to be
extended ahead of the input tensor in the `D` th dimension, and paddings[D, 1] indicates how many sizes to
be extended behind of the input tensor in the `D` th dimension.
Inputs:
- **input_x** (Tensor) - The input tensor.
Outputs:
Tensor, the tensor after padding.
Examples:
>>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> pad_op = P.Pad(((1, 2), (2, 1)))
>>> output_tensor = pad_op(input_tensor)
>>> assert output_tensor == Tensor(np.array([[ 0. , 0. , 0. , 0. , 0. , 0. ],
>>> [ 0. , 0. , -0.1, 0.3, 3.6, 0. ],
>>> [ 0. , 0. , 0.4, 0.5, -3.2, 0. ],
>>> [ 0. , 0. , 0. , 0. , 0. , 0. ],
>>> [ 0. , 0. , 0. , 0. , 0. , 0. ]]), mindspore.float32)
"""
@prim_attr_register
def __init__(self, paddings):
"""Init Pad"""
self.init_prim_io_names(inputs=['x'], outputs=['y'])
if not isinstance(paddings, tuple):
raise TypeError('Paddings must be tuple type.')
for item in paddings:
if len(item) != 2:
raise ValueError('The shape of paddings must be (n, 2).')
self.paddings = paddings
def infer_shape(self, x):
paddings = np.array(self.paddings)
validator.check_integer('paddings.shape', paddings.size, len(x) * 2, Rel.EQ, self.name)
if not np.all(paddings >= 0):
raise ValueError('All elements of paddings must be >= 0.')
y_shape = ()
for i in range(int(paddings.size / 2)):
y_shape += ((x[i] + paddings[i, 0] + paddings[i, 1]),)
return y_shape
def infer_dtype(self, x):
validator.check_subclass("input_x", x, mstype.tensor, self.name)
return x
class MirrorPad(PrimitiveWithInfer):
"""
Pads the input tensor according to the paddings and mode.
Args:
mode (str): Specifies padding mode. The optional values are "REFLECT", "SYMMETRIC".
Default: "REFLECT".
Inputs:
- **input_x** (Tensor) - The input tensor.
- **paddings** (Tensor) - The paddings tensor. The value of `paddings` is a matrix(list),
and its shape is (N, 2). N is the rank of input data. All elements of paddings
are int type. For the input in `D` th dimension, paddings[D, 0] indicates how many sizes to be
extended ahead of the input tensor in the `D` th dimension, and paddings[D, 1] indicates how many sizes to
be extended behind of the input tensor in the `D` th dimension.
Outputs:
Tensor, the tensor after padding.
- If `mode` is "REFLECT", it uses a way of symmetrical copying throught the axis of symmetry to fill in.
If the `input_x` is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the
Outputs is [[6,5,4,5,6,5,4],[3,2,1,2,3,2,1],[6,5,4,5,6,5,4],[9,8,7,8,9,8,7],[6,5,4,5,6,5,4]].
- If `mode` is "SYMMETRIC", the filling method is similar to the "REFLECT". It is also copied
according to the symmetry axis, except that it includes the symmetry axis. If the `input_x`
is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the Outputs is
[[2,1,1,2,3,3,2],[2,1,1,2,3,3,2],[5,4,4,5,6,6,5],[8,7,7,8,9,9,8],[8,7,7,8,9,9,8]].
Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> import mindspore.nn as nn
>>> import numpy as np
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.pad = P.MirrorPad(mode="REFLECT")
>>> def construct(self, x, paddings):
>>> return self.pad(x, paddings)
>>> x = np.random.random(size=(2, 3)).astype(np.float32)
>>> paddings = Tensor([[1,1],[2,2]])
>>> pad = Net()
>>> ms_output = pad(Tensor(x), paddings)
"""
@prim_attr_register
def __init__(self, mode='REFLECT'):
"""Init Pad"""
validator.check_string('mode', mode, ['REFLECT', 'SYMMETRIC'], self.name)
self.mode = mode
self.set_const_input_indexes([1])
def __infer__(self, input_x, paddings):
validator.check_subclass("input_x", input_x['dtype'], mstype.tensor, self.name)
validator.check_subclass("paddings", paddings['dtype'], mstype.tensor, self.name)
x_shape = list(input_x['shape'])
paddings_value = paddings['value'].asnumpy()
paddings_size = paddings_value.size
validator.check_integer('paddings.shape', paddings_size, len(x_shape) * 2, Rel.EQ, self.name)
if not np.all(paddings_value >= 0):
raise ValueError('All elements of paddings must be >= 0.')
adjust = 0
if self.mode == 'SYMMETRIC':
adjust = 1
for i in range(0, int(paddings_size / 2)):
if (paddings_value[i, 0] >= x_shape[i] + adjust) or (paddings_value[i, 1] >= x_shape[i] + adjust):
raise ValueError('At least one dim has too high a padding value for this input and mode')
y_shape = ()
for i in range(0, int(paddings_size / 2)):
y_shape += ((x_shape[i] + paddings_value[i, 0] + paddings_value[i, 1]),)
return {'shape': y_shape,
'dtype': input_x['dtype'],
'value': None}
class ROIAlign(PrimitiveWithInfer):
"""
Computes Region of Interest (RoI) Align operator.
The operator computes the value of each sampling point by bilinear interpolation from the nearby grid points on the
feature map. No quantization is performed on any coordinates involved in the RoI, its bins, or the sampling
points. The details of (RoI) Align operator are described in `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
Args:
pooled_height (int): The output features' height.
pooled_width (int): The output features' width.
spatial_scale (float): A scaling factor that maps the raw image coordinates to the input
feature map coordinates. Suppose the height of a RoI is `ori_h` in the raw image and `fea_h` in the
input feature map, the `spatial_scale` should be `fea_h / ori_h`.
sample_num (int): Number of sampling points. Default: 2.
roi_end_mode (int): Number must be 0 or 1. Default: 1.
Inputs:
- **features** (Tensor) - The input features, whose shape should be `(N, C, H, W)`.
- **rois** (Tensor) - The shape is `(rois_n, 5)`. With data type of float16 or float32.
`rois_n` represents the number of RoI. The size of the second dimension should be `5` and the `5` colunms
are `(image_index, top_left_x, top_left_y, bottom_right_x, bottom_right_y)`. `image_index` represents the
index of image. `top_left_x` and `top_left_y` represent the `x, y` coordinates of the top left corner
of corresponding RoI, respectively. `bottom_right_x` and `bottom_right_y` represent the `x, y`
coordinates of the bottom right corner of corresponding RoI, respectively.
Outputs:
Tensor, the shape is `(rois_n, C, pooled_height, pooled_width)`.
Examples:
>>> input_tensor = Tensor(np.array([[[[1., 2.], [3., 4.]]]]), mindspore.float32)
>>> rois = Tensor(np.array([[0, 0.2, 0.3, 0.2, 0.3]]), mindspore.float32)
>>> roi_align = P.ROIAlign(2, 2, 0.5, 2)
>>> output_tensor = roi_align(input_tensor, rois)
>>> assert output_tensor == Tensor(np.array([[[[2.15]]]]), mindspore.float32)
"""
@prim_attr_register
def __init__(self, pooled_height, pooled_width, spatial_scale, sample_num=2, roi_end_mode=1):
"""init ROIAlign"""
validator.check_value_type("pooled_height", pooled_height, [int], self.name)
validator.check_value_type("pooled_width", pooled_width, [int], self.name)
validator.check_value_type("spatial_scale", spatial_scale, [float], self.name)
validator.check_value_type("sample_num", sample_num, [int], self.name)
validator.check_value_type("roi_end_mode", roi_end_mode, [int], self.name)
validator.check_int_range("roi_end_mode", roi_end_mode, 0, 1, Rel.INC_BOTH, self.name)
self.pooled_height = pooled_height
self.pooled_width = pooled_width
self.spatial_scale = spatial_scale
self.sample_num = sample_num
self.roi_end_mode = roi_end_mode
def infer_shape(self, inputs_shape, rois_shape):
return [rois_shape[0], inputs_shape[1], self.pooled_height, self.pooled_width]
def infer_dtype(self, inputs_type, rois_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"inputs_type": inputs_type}, valid_types, self.name)
validator.check_tensor_type_same({"rois_type": rois_type}, valid_types, self.name)
return inputs_type
class Adam(PrimitiveWithInfer):
r"""
Updates gradients by Adaptive Moment Estimation (Adam) algorithm.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Tensor) - Weights to be updated.
- **m** (Tensor) - The 1st moment vector in the updating formula, has the same type as `var`.
- **v** (Tensor) - the 2nd moment vector in the updating formula.
Mean square gradients with the same type as `var`.
- **beta1_power** (float) - :math:`beta_1^t` in the updating formula.
- **beta2_power** (float) - :math:`beta_2^t` in the updating formula.
- **lr** (float) - :math:`l` in the updating formula.
- **beta1** (float) - The exponential decay rate for the 1st moment estimations.
- **beta2** (float) - The exponential decay rate for the 2nd moment estimations.
- **epsilon** (float) - Term added to the denominator to improve numerical stability.
- **gradient** (Tensor) - Gradients, has the same type as `var`.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adam = P.Adam()
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad):
>>> out = self.apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,
>>> epsilon, grad)
>>> return out
>>> net = Net()
>>> gradient = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> result = net(0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient)
"""
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
return var_dtype, m_dtype, v_dtype
class FusedSparseAdam(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update parameters by Adaptive Moment Estimation (Adam)
algorithm. This operator is used when the gradient is sparse.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Parameter) - Parameters to be updated with float32 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same type as `var` with
float32 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients, has the same type as
`var` with float32 data type.
- **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.
- **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.
- **lr** (Tensor) - :math:`l` in the updating formula. With float32 data type.
- **beta1** (Tensor) - The exponential decay rate for the 1st moment estimations with float32 data type.
- **beta2** (Tensor) - The exponential decay rate for the 2nd moment estimations with float32 data type.
- **epsilon** (Tensor) - Term added to the denominator to improve numerical stability with float32 data type.
- **gradient** (Tensor) - Gradient value with float32 data type.
- **indices** (Tensor) - Gradient indices with int32 data type.
Outputs:
Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **m** (Tensor) - A Tensor with shape (1,).
- **v** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adam = P.FusedSparseAdam()
>>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices):
>>> out = self.sparse_apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,
>>> epsilon, grad, indices)
>>> return out
>>> net = Net()
>>> beta1_power = Tensor(0.9, mstype.float32)
>>> beta2_power = Tensor(0.999, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.999, mstype.float32)
>>> epsilon = Tensor(1e-8, mstype.float32)
>>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)
>>> indices = Tensor([0, 1], mstype.int32)
>>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseLazyAdam(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update parameters by Adaptive Moment Estimation (Adam)
algorithm. This operator is used when the gradient is sparse. The behavior is not equivalent to the
original Adam algorithm, as only the current indices parameters will be updated.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Parameter) - Parameters to be updated with float32 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same type as `var` with
float32 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients, has the same type as
`var` with float32 data type.
- **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.
- **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.
- **lr** (Tensor) - :math:`l` in the updating formula with float32 data type.
- **beta1** (Tensor) - The exponential decay rate for the 1st moment estimations with float32 data type.
- **beta2** (Tensor) - The exponential decay rate for the 2nd moment estimations with float32 data type.
- **epsilon** (Tensor) - Term added to the denominator to improve numerical stability with float32 data type.
- **gradient** (Tensor) - Gradient value with float32 data type.
- **indices** (Tensor) - Gradient indices with int32 data type.
Outputs:
Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **m** (Tensor) - A Tensor with shape (1,).
- **v** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_lazyadam = P.FusedSparseLazyAdam()
>>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices):
>>> out = self.sparse_apply_lazyadam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1,
>>> beta2, epsilon, grad, indices)
>>> return out
>>> net = Net()
>>> beta1_power = Tensor(0.9, mstype.float32)
>>> beta2_power = Tensor(0.999, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.999, mstype.float32)
>>> epsilon = Tensor(1e-8, mstype.float32)
>>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)
>>> indices = Tensor([0, 1], mstype.int32)
>>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseFtrl(PrimitiveWithInfer):
"""
Merge the duplicate value of the gradient and then update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`. The shape
of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
Tuple of 3 Tensor, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **accum** (Tensor) - A Tensor with shape (1,).
- **linear** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlNet, self).__init__()
>>> self.sparse_apply_ftrl = P.FusedSparseFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlNet()
>>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32))
>>> indices = Tensor(np.array([0, 1]).astype(np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'indices'],
outputs=['output'])
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return [1], [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class FusedSparseProximalAdagrad(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update relevant entries according to the proximal adagrad
algorithm.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the variable and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. The data type must be float32.
- **accum** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Tensor) - The learning rate value. The data type must be float32.
- **l1** (Tensor) - l1 regularization strength. The data type must be float32.
- **l2** (Tensor) - l2 regularization strength. The data type must be float32.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient. The data type must be float32.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`. The data type
must be int32.
Outputs:
Tuple of 2 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **accum** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_proximal_adagrad = P.FusedSparseProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum")
>>> self.lr = Tensor(0.01, mstype.float32)
>>> self.l1 = Tensor(0.0, mstype.float32)
>>> self.l2 = Tensor(0.0, mstype.float32)
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1,
>>> self.l2, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32))
>>> indices = Tensor(np.array([0, 1]).astype(np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('l1', dtype=sig.sig_dtype.T),
sig.make_sig('l2', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
return [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class KLDivLoss(PrimitiveWithInfer):
r"""
Computes the Kullback-Leibler divergence between the target and the output.
Note:
Sets input as :math:`x`, input label as :math:`y`, output as :math:`\ell(x, y)`.
Let,
.. math::
L = \{l_1,\dots,l_N\}^\top, \quad
l_n = y_n \cdot (\log y_n - x_n)
Then,
.. math::
\ell(x, y) = \begin{cases}
L, & \text{if reduction} = \text{`none';}\\
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
Args:
reduction (str): Specifies the reduction to be applied to the output.
Its value should be one of 'none', 'mean', 'sum'. Default: 'mean'.
Inputs:
- **input_x** (Tensor) - The input Tensor. The data type must be float32.
- **input_y** (Tensor) - The label Tensor which has the same shape as `input_x`. The data type must be float32.
Outputs:
Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`.
Otherwise it is a scalar.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.kldiv_loss = P.KLDivLoss()
>>> def construct(self, x, y):
>>> result = self.kldiv_loss(x, y)
>>> return result
>>>
>>> net = Net()
>>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)
>>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32)
>>> result = net(input_x, input_y)
"""
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
return x_type
class BinaryCrossEntropy(PrimitiveWithInfer):
r"""
Computes the Binary Cross Entropy between the target and the output.
Note:
Sets input as :math:`x`, input label as :math:`y`, output as :math:`\ell(x, y)`.
Let,
.. math::
L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right]
Then,
.. math::
\ell(x, y) = \begin{cases}
L, & \text{if reduction} = \text{`none';}\\
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
Args:
reduction (str): Specifies the reduction to be applied to the output.
Its value should be one of 'none', 'mean', 'sum'. Default: 'mean'.
Inputs:
- **input_x** (Tensor) - The input Tensor. The data type should be float16 or float32.
- **input_y** (Tensor) - The label Tensor which has same shape and data type as `input_x`.
- **weight** (Tensor, optional) - A rescaling weight applied to the loss of each batch element.
And it should have same shape and data type as `input_x`. Default: None.
Outputs:
Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`.
Otherwise, the output is a scalar.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.binary_cross_entropy = P.BinaryCrossEntropy()
>>> def construct(self, x, y, weight):
>>> result = self.binary_cross_entropy(x, y, weight)
>>> return result
>>>
>>> net = Net()
>>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)
>>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32)
>>> weight = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> result = net(input_x, input_y, weight)
0.38240486
"""
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape, weight_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if weight_shape:
validator.check('y_shape', y_shape, 'weight_shape', weight_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type, weight_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
if weight_type:
validator.check_tensor_type_same({'x': x_type, 'weight': weight_type}, valid_types, self.name)
return x_type
class ApplyAdaMax(PrimitiveWithInfer):
r"""
Update relevant entries according to the adamax scheme.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m_{t} = \beta_1 * m_{t-1} + (1 - \beta_1) * g \\
v_{t} = \max(\beta_2 * v_{t-1}, \left| g \right|) \\
var = var - \frac{l}{1 - \beta_1^t} * \frac{m_{t}}{v_{t} + \epsilon}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`v` represents the 2nd moment vector, :math:`v_{t-1}`
is the last momentent of :math:`v_{t}`, :math:`l` represents scaling factor `lr`,
:math:`g` represents `grad`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`beta_1^t` represents `beta1_power`, :math:`var` represents the variable to be updated,
:math:`\epsilon` represents `epsilon`.
Inputs of `var`, `m`, `v` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable to be updated. With float32 or float16 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same shape and type as `var`.
With float32 or float16 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients
with the same shape and type as `var`. With float32 or float16 data type.
- **beta1_power** (Union[Number, Tensor]) - :math:`beta_1^t` in the updating formula, should be scalar.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - Learning rate, :math:`l` in the updating formula, should be scalar.
With float32 or float16 data type.
- **beta1** (Union[Number, Tensor]) - The exponential decay rate for the 1st moment estimations,
should be scalar. With float32 or float16 data type.
- **beta2** (Union[Number, Tensor]) - The exponential decay rate for the 2nd moment estimations,
should be scalar. With float32 or float16 data type.
- **epsilon** (Union[Number, Tensor]) - A small value added for numerical stability, should be scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor for gradient, has the same shape and type as `var`.
With float32 or float16 data type.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_ada_max = P.ApplyAdaMax()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, lr, beta1, beta2, epsilon, grad):
>>> out = self.apply_ada_max(self.var, self.m, self.v, beta1_power, lr, beta1, beta2, epsilon, grad)
>>> return out
>>> net = Net()
>>> beta1_power =Tensor(0.9, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.99, mstype.float32)
>>> epsilon = Tensor(1e-10, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(beta1_power, lr, beta1, beta2, epsilon, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T1),
sig.make_sig('lr', dtype=sig.sig_dtype.T2),
sig.make_sig('beta1', dtype=sig.sig_dtype.T3),
sig.make_sig('beta2', dtype=sig.sig_dtype.T4),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T5),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"""init ApplyAdaMax"""
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("m_shape", m_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("v_shape", v_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
beta1_power_shp_len = len(beta1_power_shape)
validator.check_integer("beta1 power's rank", beta1_power_shp_len, 1, Rel.LE, self.name)
if beta1_power_shp_len == 1:
validator.check_integer("beta1_power_shape[0]", beta1_power_shape[0], 1, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
beta1_shp_len = len(beta1_shape)
validator.check_integer("beta1's rank", beta1_shp_len, 1, Rel.LE, self.name)
if beta1_shp_len == 1:
validator.check_integer("beta1_shape[0]", beta1_shape[0], 1, Rel.EQ, self.name)
beta2_shp_len = len(beta2_shape)
validator.check_integer("beta2's rank", beta2_shp_len, 1, Rel.LE, self.name)
if beta2_shp_len == 1:
validator.check_integer("beta2_shape[0]", beta2_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("epsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1_power": beta1_power_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1": beta1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta2": beta2_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, m_dtype, v_dtype
class ApplyAdadelta(PrimitiveWithInfer):
r"""
Update relevant entries according to the adadelta scheme.
.. math::
accum = \rho * accum + (1 - \rho) * grad^2
.. math::
\text{update} = \sqrt{\text{accum_update} + \epsilon} * \frac{grad}{\sqrt{accum + \epsilon}}
.. math::
\text{accum_update} = \rho * \text{accum_update} + (1 - \rho) * update^2
.. math::
var -= lr * update
Inputs of `var`, `accum`, `accum_update` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Weights to be updated. With float32 or float16 data type.
- **accum** (Parameter) - Accumulation to be updated, has the same shape and type as `var`.
With float32 or float16 data type.
- **accum_update** (Parameter) - Accum_update to be updated, has the same shape and type as `var`.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - Learning rate, should be scalar. With float32 or float16 data type.
- **rho** (Union[Number, Tensor]) - Decay rate, should be scalar. With float32 or float16 data type.
- **epsilon** (Union[Number, Tensor]) - A small value added for numerical stability, should be scalar.
With float32 or float16 data type.
- **grad** (Tensor) - Gradients, has the same shape and type as `var`. With float32 or float16 data type.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
- **accum_update** (Tensor) - The same shape and data type as `accum_update`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adadelta = P.ApplyAdadelta()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.accum_update = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum_update")
>>> def construct(self, lr, rho, epsilon, grad):
>>> out = self.apply_adadelta(self.var, self.accum, self.accum_update, lr, rho, epsilon, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> rho = Tensor(0.0, mstype.float32)
>>> epsilon = Tensor(1e-6, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, rho, epsilon, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum_update', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('rho', dtype=sig.sig_dtype.T2),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"""init ApplyAdadelta"""
def infer_shape(self, var_shape, accum_shape, accum_update_shape, lr_shape, rho_shape,
epsilon_shape, grad_shape):
validator.check("accum_shape", accum_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("accum_update_shape", accum_update_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
rho_shp_len = len(rho_shape)
validator.check_integer("rho's rank", rho_shp_len, 1, Rel.LE, self.name)
if rho_shp_len == 1:
validator.check_integer("rho_shape[0]", rho_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("lepsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape, accum_update_shape
def infer_dtype(self, var_dtype, accum_dtype, accum_update_dtype, lr_dtype, rho_dtype,
epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "accum": accum_dtype, "accum_update": accum_update_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"rho": rho_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, accum_dtype, accum_update_dtype
class ApplyAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum}}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent..
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
update_slots (bool): If `True`, `accum` will be updated. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. With float32 or float16 data type.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be scalar. With float32 or float16 data type.
- **grad** (Tensor) - A tensor for gradient. The shape and dtype should be the same as `var`.
With float32 or float16 data type.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adagrad = P.ApplyAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> def construct(self, lr, grad):
>>> out = self.apply_adagrad(self.var, self.accum, lr, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, update_slots=True):
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class ApplyAdagradV2(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagradv2 scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum} + \epsilon}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
epsilon (float): A small value added for numerical stability.
update_slots (bool): If `True`, `accum` will be updated. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. With float16 or float32 data type.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
With float16 or float32 data type.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a float number or
a scalar tensor with float16 or float32 data type.
- **grad** (Tensor) - A tensor for gradient. The shape and dtype should be the same as `var`.
With float16 or float32 data type.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adagrad_v2 = P.ApplyAdagradV2(epsilon=1e-6)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> def construct(self, lr, grad):
>>> out = self.apply_adagrad_v2(self.var, self.accum, lr, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, epsilon, update_slots=True):
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'grad shape', grad_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, [mstype.float16, mstype.float32], self.name)
return var_dtype, accum_dtype
class SparseApplyAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * (1 / sqrt(accum))
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): Learning rate.
update_slots (bool): If `True`, `accum` will be updated. Default: True.
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.
Has the same data type as `var`.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adagrad = P.SparseApplyAdagrad(lr=1e-8)
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="accum")
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_adagrad(self.var, self.accum, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> indices = Tensor([0, 1, 2], mstype.int32)
>>> result = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, update_slots=True, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_number_range("lr", lr, float("-inf"), float("inf"), Rel.INC_NEITHER, self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class SparseApplyAdagradV2(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum} + \epsilon}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): Learning rate.
epsilon (float): A small value added for numerical stability.
use_locking (bool): If `True`, the var and accumulation tensors will be protected from being updated.
Default: False.
update_slots (bool): If `True`, the computation logic will be different to `False`. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.
Has the same data type as `var`.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adagrad_v2 = P.SparseApplyAdagradV2(lr=1e-8, epsilon=1e-6)
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="accum")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_adagrad_v2(self.var, self.accum, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> indices = Tensor([0, 1, 2], mstype.int32)
>>> result = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, epsilon, use_locking=False, update_slots=True):
self.lr = validator.check_value_type("lr", lr, [float], self.name)
self.epsilon = validator.check_value_type("epsilon", epsilon, [float], self.name)
self.use_locking = validator.check_value_type("update_slots", update_slots, [bool], self.name)
self.update_slots = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class ApplyProximalAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the proximal adagrad algorithm.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type should be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. Must has the same shape and dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be scalar. The data type should be
float16 or float32.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, should be scalar. The data type should be
float16 or float32.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, should be scalar. The data type should be
float16 or float32.
- **grad** (Tensor) - Gradient with the same shape and dtype as `var`.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_proximal_adagrad = P.ApplyProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.lr = 0.01
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, grad):
>>> out = self.apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
l1_shp_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shp_len, 1, Rel.LE, self.name)
if l1_shp_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shp_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shp_len, 1, Rel.LE, self.name)
if l2_shp_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class SparseApplyProximalAdagrad(PrimitiveWithCheck):
r"""
Update relevant entries according to the proximal adagrad algorithm. Compared with ApplyProximalAdagrad,
an additional index tensor is input.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value. Tshould be a float number or
a scalar tensor with float16 or float32 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength. should be a float number or
a scalar tensor with float16 or float32 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength. should be a float number or
a scalar tensor with float16 or float32 data type..
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.lr = 0.01
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1,
self.l2, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones((3,), np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T4),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float16, mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
class ApplyAddSign(PrimitiveWithInfer):
r"""
Update relevant entries according to the AddSign algorithm.
.. math::
\begin{array}{ll} \\
m_{t} = \beta * m_{t-1} + (1 - \beta) * g \\
\text{update} = (\alpha + \text{sign_decay} * sign(g) * sign(m)) * g \\
var = var - lr_{t} * \text{update}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`lr` represents scaling factor `lr`, :math:`g` represents `grad`.
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **m** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a scalar.
With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **sign_decay** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **beta** (Union[Number, Tensor]) - The exponential decay rate, should be a scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_add_sign = P.ApplyAddSign()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.lr = 0.001
>>> self.alpha = 1.0
>>> self.sign_decay = 0.99
>>> self.beta = 0.9
>>> def construct(self, grad):
>>> out = self.apply_add_sign(self.var, self.m, self.lr, self.alpha, self.sign_decay, self.beta, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('alpha', dtype=sig.sig_dtype.T2),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T3),
sig.make_sig('beta', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyAddSign"
def infer_shape(self, var_shape, m_shape, lr_shape, alpha_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, alpha_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyPowerSign(PrimitiveWithInfer):
r"""
Update relevant entries according to the AddSign algorithm.
.. math::
\begin{array}{ll} \\
m_{t} = \beta * m_{t-1} + (1 - \beta) * g \\
\text{update} = \exp(\text{logbase} * \text{sign_decay} * sign(g) * sign(m)) * g \\
var = var - lr_{t} * \text{update}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`lr` represents scaling factor `lr`, :math:`g` represents `grad`.
All of inputs comply with the implicit type conversion rules to make the data types consistent.
If `lr`, `logbase`, `sign_decay` or `beta` is a number, the number is automatically converted to Tensor,
and the data type is consistent with the Tensor data type involved in the operation.
If inputs are tensors and have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
If data type of `var` is float16, all inputs must have the same data type as `var`.
- **m** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a scalar.
With float32 or float16 data type.
- **logbase** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **sign_decay** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **beta** (Union[Number, Tensor]) - The exponential decay rate, should be a scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_power_sign = P.ApplyPowerSign()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.lr = 0.001
>>> self.logbase = np.e
>>> self.sign_decay = 0.99
>>> self.beta = 0.9
>>> def construct(self, grad):
>>> out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase,
self.sign_decay, self.beta, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('logbase', dtype=sig.sig_dtype.T),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T),
sig.make_sig('beta', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyPowerSign"
def infer_shape(self, var_shape, m_shape, lr_shape, logbase_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
logbase_shape_len = len(logbase_shape)
validator.check_integer("logbase's rank", logbase_shape_len, 1, Rel.LE, self.name)
if logbase_shape_len == 1:
validator.check_integer("logbase_shape[0]", logbase_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, logbase_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"logbase": logbase_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyGradientDescent(PrimitiveWithInfer):
r"""
Update relevant entries according to the following formula.
.. math::
var = var - \alpha * \delta
Inputs of `var` and `delta` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Scaling factor, should be a scalar. With float32 or float16 data type.
- **delta** (Tensor) - A tensor for the change, has the same type as `var`.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_gradient_descent = P.ApplyGradientDescent()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.alpha = 0.001
>>> def construct(self, delta):
>>> out = self.apply_gradient_descent(self.var, self.alpha, delta)
>>> return out
>>> net = Net()
>>> delta = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(delta)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyGradientDescent"
def infer_shape(self, var_shape, alpha_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
return var_dtype
class ApplyProximalGradientDescent(PrimitiveWithInfer):
r"""
Update relevant entries according to the FOBOS(Forward Backward Splitting) algorithm.
.. math::
\text{prox_v} = var - \alpha * \delta
.. math::
var = \frac{sign(\text{prox_v})}{1 + \alpha * l2} * \max(\left| \text{prox_v} \right| - alpha * l1, 0)
Inputs of `var` and `delta` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Saling factor, should be a scalar. With float32 or float16 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, should be scalar.
With float32 or float16 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, should be scalar.
With float32 or float16 data type.
- **delta** (Tensor) - A tensor for the change, has the same type as `var`.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_proximal_gradient_descent = P.ApplyProximalGradientDescent()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.alpha = 0.001
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, delta):
>>> out = self.apply_proximal_gradient_descent(self.var, self.alpha, self.l1, self.l2, delta)
>>> return out
>>> net = Net()
>>> delta = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(delta)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyGradientDescent"
def infer_shape(self, var_shape, alpha_shape, l1_shape, l2_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
l1_shape_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shape_len, 1, Rel.LE, self.name)
if l1_shape_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shape_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shape_len, 1, Rel.LE, self.name)
if l2_shape_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, l1_dtype, l2_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype
class LARSUpdate(PrimitiveWithInfer):
"""
Conduct lars (layer-wise adaptive rate scaling) update on the square sum of gradient.
Args:
epsilon (float): Term added to the denominator to improve numerical stability. Default: 1e-05.
hyperpara (float): Trust coefficient for calculating the local learning rate. Default: 0.001.
use_clip (bool): Whether to use clip operation for calculating the local learning rate. Default: False.
Inputs:
- **weight** (Tensor) - The weight to be updated.
- **gradient** (Tensor) - The gradient of weight, which has the same shape and dtype with weight.
- **norm_weight** (Tensor) - A scalar tensor, representing the square sum of weight.
- **norm_gradient** (Tensor) - A scalar tensor, representing the square sum of gradient.
- **weight_decay** (Union[Number, Tensor]) - Weight decay. It should be a scalar tensor or number.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. It should be a scalar tensor or number.
Outputs:
Tensor, represents the new gradient.
Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import functional as F
>>> import mindspore.nn as nn
>>> import numpy as np
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.lars = P.LARSUpdate()
>>> self.reduce = P.ReduceSum()
>>> def construct(self, weight, gradient):
>>> w_square_sum = self.reduce(F.square(weight))
>>> grad_square_sum = self.reduce(F.square(gradient))
>>> grad_t = self.lars(weight, gradient, w_square_sum, grad_square_sum, 0.0, 1.0)
>>> return grad_t
>>> weight = np.random.random(size=(2, 3)).astype(np.float32)
>>> gradient = np.random.random(size=(2, 3)).astype(np.float32)
>>> net = Net()
>>> ms_output = net(Tensor(weight), Tensor(gradient))
"""
@prim_attr_register
def __init__(self, epsilon=1e-05, hyperpara=0.001, use_clip=False):
"""init"""
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("hyperpara", hyperpara, [float], self.name)
validator.check_value_type("use_clip", use_clip, [bool], self.name)
def infer_shape(self, weight_shape, gradient_shape, norm_weight_shape, norm_gradient_shape, weight_decay_shape,
learning_rate_shape):
validator.check("weight shape", weight_shape, "gradient shape", gradient_shape, Rel.EQ, self.name)
validator.check("norm weight shape", norm_weight_shape, "norm gradient shape", norm_gradient_shape, Rel.EQ,
self.name)
shp_len = len(weight_decay_shape)
validator.check_integer("weight decay's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("weight_decay_shape[0]", weight_decay_shape[0], 1, Rel.EQ, self.name)
shp_len = len(learning_rate_shape)
validator.check_integer("learning rate's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("learning_rate_shape[0]", learning_rate_shape[0], 1, Rel.EQ, self.name)
return weight_shape
def infer_dtype(self, weight_dtype, gradient_dtype, norm_weight_dtype, norm_gradient_dtype,
weight_decay_dtype, learning_rate_dtype):
args = {"Weight dtype": weight_dtype, "gradient dtype": gradient_dtype, "norm weight dtype": norm_weight_dtype,
"norm gradient dtype": norm_gradient_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32, mstype.int16, mstype.int32], self.name)
validator.check_scalar_or_tensor_type_same({"weight_decay": weight_decay_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
validator.check_scalar_or_tensor_type_same({"learning_rate": learning_rate_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
return weight_dtype
class ApplyFtrl(PrimitiveWithInfer):
"""
Update relevant entries according to the FTRL scheme.
Args:
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type should be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - Gradient. The data type should be float16 or float32.
- **lr** (Union[Number, Tensor]) - The learning rate value, must be positive. Default: 0.001.
It should be a float number or a scalar tensor with float16 or float32 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, must be greater than or equal to zero.
Default: 0.0. It should be a float number or a scalar tensor with float16 or float32 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, must be greater than or equal to zero.
Default: 0.0. It should be a float number or a scalar tensor with float16 or float32 data type.
- **lr_power** (Union[Number, Tensor]) - Learning rate power controls how the learning rate decreases
during training, must be less than or equal to zero. Use fixed learning rate if lr_power is zero.
Default: -0.5. It should be a float number or a scalar tensor with float16 or float32 data type.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class ApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(ApplyFtrlNet, self).__init__()
>>> self.apply_ftrl = P.ApplyFtrl()
>>> self.lr = 0.001
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> self.lr_power = -0.5
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad):
>>> out = self.apply_ftrl(self.var, self.accum, self.linear, grad, self.lr, self.l1, self.l2,
>>> self.lr_power)
>>> return out
>>>
>>> net = ApplyFtrlNet()
>>> input_x = Tensor(np.random.randint(-4, 4, (3, 3)), mindspore.float32)
>>> result = net(input_x)
[[0.67455846 0.14630564 0.160499 ]
[0.16329421 0.00415689 0.05202988]
[0.18672481 0.17418946 0.36420345]]
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'lr', 'l1', 'l2', 'lr_power'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_tbe = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, lr_shape, l1_shape, l2_shape,
lr_power_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if self.is_tbe:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_type, accum_type, linear_type, grad_type, lr_type, l1_type, l2_type, lr_power_type):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_type, 'accum': accum_type, 'linear': linear_type, 'grad': grad_type}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr_power": lr_power_type}, valid_types, self.name)
if self.is_tbe:
return var_type, var_type, var_type
return var_type
class SparseApplyFtrl(PrimitiveWithCheck):
"""
Update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
- **var** (Tensor) - Tensor, has the same shape and type as `var`.
- **accum** (Tensor) - Tensor, has the same shape and type as `accum`.
- **linear** (Tensor) - Tensor, has the same shape and type as `linear`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlNet, self).__init__()
>>> self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlNet()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones([3]), mindspore.int32)
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
class SparseApplyFtrlV2(PrimitiveWithInfer):
"""
Update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
l2_shrinkage (float): L2 shrinkage regularization.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): If `True`, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - Tensor, has the same shape and type as `var`.
- **accum** (Tensor) - Tensor, has the same shape and type as `accum`.
- **linear** (Tensor) - Tensor, has the same shape and type as `linear`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlV2Net(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlV2Net, self).__init__()
>>> self.sparse_apply_ftrl_v2 = P.SparseApplyFtrlV2(lr=0.01, l1=0.0, l2=0.0,
l2_shrinkage=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl_v2(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlV2Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones([3]), mindspore.int32)
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.l2_shrinkage = validator.check_value_type("l2_shrinkage", l2_shrinkage, [float], self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape, linear_shape
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class ConfusionMulGrad(PrimitiveWithInfer):
"""
`output0` is the dot product result of input0 and input1.
`output1` is the dot product result of input0 and input1, then apply the reducesum operation on it.
Args:
axis (Union[int, tuple[int], list[int]]): The dimensions to reduce.
Default:(), reduce all dimensions. Only constant value is allowed.
keep_dims (bool):
- If true, keep these reduced dimensions and the length as 1.
- If false, don't keep these dimensions. Default:False.
Inputs:
- **input_0** (Tensor) - The input Tensor.
- **input_1** (Tensor) - The input Tensor.
- **input_2** (Tensor) - The input Tensor.
Outputs:
- **output_0** (Tensor) - The same shape as `input0`.
- **output_1** (Tensor)
- If axis is (), and keep_dims is false, the output is a 0-D array representing
the sum of all elements in the input array.
- If axis is int, set as 2, and keep_dims is false,
the shape of output is :math:`(x_1,x_3,...,x_R)`.
- If axis is tuple(int), set as (2,3), and keep_dims is false,
the shape of output is :math:`(x_1,x_4,...x_R)`.
Examples:
>>> confusion_mul_grad = P.ConfusionMulGrad()
>>> input_0 = Tensor(np.random.randint(-2, 2, (2, 3)), mindspore.float32)
>>> input_1 = Tensor(np.random.randint(0, 4, (2, 3)), mindspore.float32)
>>> input_2 = Tensor(np.random.randint(-4, 0, (2, 3)), mindspore.float32)
>>> output_0, output_1 = confusion_mul_grad(input_0, input_1, input_2)
output_0:
[[ 3. 1. 0.]
[-6. 2. -2.]]
output_1:
-3.0
"""
@prim_attr_register
def __init__(self, axis=(), keep_dims=False):
self.init_prim_io_names(inputs=["input0", "input1", "input2"], outputs=["output0", "output1"])
self.axis_ = validator.check_value_type("axis", axis, [int, tuple, list], self.name)
self.keep_dims_ = validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
def infer_shape(self, input0_shape, input1_shape, input2_shape):
outshape0 = input0_shape
outshape1 = _infer_shape_reduce(input1_shape, self.axis_, self.keep_dims_, self.name)
return outshape0, outshape1
def infer_dtype(self, input0_dtype, input1_dtype, input2_dtype):
validator.check_subclass("input0_dtype", input0_dtype, mstype.tensor, self.name)
validator.check_subclass("input1_dtype", input1_dtype, mstype.tensor, self.name)
validator.check_subclass("input2_dtype", input2_dtype, mstype.tensor, self.name)
return input0_dtype, input1_dtype
class Dropout(PrimitiveWithInfer):
"""
During training, randomly zeroes some of the elements of the input tensor with probability.
Args:
keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout = P.Dropout(keep_prob=0.5)
>>> in = Tensor((20, 16, 50, 50))
>>> out = dropout(in)
"""
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 1, Rel.GE, self.name)
mask_shape = x_shape
return x_shape, mask_shape
def infer_dtype(self, x_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"x_dtype": x_dtype}, valid_types, self.name)
return x_dtype, x_dtype
class DropoutGrad(PrimitiveWithInfer):
"""
The gradient of Dropout. During training, randomly zeroes some of the elements
of the input tensor with probability.
Args:
keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout_grad = P.DropoutGrad(keep_prob=0.5)
>>> in = Tensor((20, 16, 50, 50))
>>> out = dropout_grad(in)
"""
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, dy_shape, mask_shape):
return dy_shape
def infer_dtype(self, dy_dtype, mask_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("dy", dy_dtype, mstype.tensor, self.name)
validator.check_subclass("mask", mask_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"dy_dtype": dy_dtype}, valid_types, self.name)
return dy_dtype
class CTCLoss(PrimitiveWithInfer):
"""
Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.
Args:
preprocess_collapse_repeated (bool): If true, repeated labels will be collapsed prior to the CTC calculation.
Default: False.
ctc_merge_repeated (bool): If false, during CTC calculation, repeated non-blank labels will not be merged
and these labels will be interpreted as individual ones. This is a simplfied
version of CTC. Default: True.
ignore_longer_outputs_than_inputs (bool): If True, sequences with longer outputs than inputs will be ignored.
Default: False.
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_classes)`. `num_classes` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved. Default blank label is `num_classes - 1`.
Data type must be float16, float32 or float64.
- **labels_indices** (Tensor) - The indices of labels. `labels_indices[i, :] == [b, t]` means `labels_values[i]`
stores the id for `(batch b, time t)`. The type must be int64 and rank must be 2.
- **labels_values** (Tensor) - A `1-D` input tensor. The values are associated with the given batch and time.
The type must be int32. `labels_values[i]` must in the range of `[0, num_classes)`.
- **sequence_length** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not be greater than `max_time`.
Outputs:
- **loss** (Tensor) - A tensor containing log-probabilities, the shape is :math:`(batch_size)`. The tensor has
the same type with `inputs`.
- **gradient** (Tensor) - The gradient of `loss`, has the same type and shape with `inputs`.
Examples:
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> labels_indices = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int64)
>>> labels_values = Tensor(np.array([2, 2]), mindspore.int32)
>>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32)
>>> ctc_loss = P.CTCLoss()
>>> output = ctc_loss(inputs, labels_indices, labels_values, sequence_length)
"""
@prim_attr_register
def __init__(self, preprocess_collapse_repeated=False, ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False):
self.init_prim_io_names(inputs=["inputs", "labels_indices", "labels_values", "sequence_length"],
outputs=["loss", "gradient"])
validator.check_value_type("preprocess_collapse_repeated", preprocess_collapse_repeated, [bool], self.name)
self.preprocess_collapse_repeated_ = preprocess_collapse_repeated
self.ctc_merge_repeated_ = validator.check_value_type("ctc_merge_repeated", ctc_merge_repeated,
[bool], self.name)
validator.check_value_type("ignore_longer_outputs_than_inputs",
ignore_longer_outputs_than_inputs, [bool], self.name)
self.ignore_longer_outputs_than_inputs_ = ignore_longer_outputs_than_inputs
def infer_shape(self, inputs, labels_indices, labels_values, sequence_length):
validator.check_integer("inputs rank", len(inputs), 3, Rel.EQ, self.name)
validator.check_integer("labels_indices rank", len(labels_indices), 2, Rel.EQ, self.name)
validator.check_integer("labels_indices dim one", labels_indices[1], 2, Rel.EQ, self.name)
validator.check_integer("labels_values rank", len(labels_values), 1, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length), 1, Rel.EQ, self.name)
validator.check('labels_indices size', labels_indices[0], 'labels_values size',
labels_values[0], Rel.EQ, self.name)
validator.check('inputs batch_size', inputs[1], 'sequence_length batch_size',
sequence_length[0], Rel.EQ, self.name)
batch_size = []
batch_size.append(inputs[1])
return batch_size, inputs
def infer_dtype(self, inputs, labels_indices, labels_values, sequence_length):
valid_dtype = [mstype.float16, mstype.float32, mstype.double]
validator.check_tensor_type_same({"inputs_dtype": inputs}, valid_dtype, self.name)
validator.check_tensor_type_same({"labels_indices_dtype": labels_indices}, [mstype.int64], self.name)
validator.check_tensor_type_same({"labels_values_dtype": labels_values}, [mstype.int32], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length}, [mstype.int32], self.name)
return inputs, inputs
class CTCGreedyDecoder(PrimitiveWithInfer):
"""
Performs greedy decoding on the logits given in inputs.
Args:
merge_repeated (bool): If True, merge repeated classes in output. Default: True.
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_classes)`. `num_classes` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved. Default blank label is `num_classes - 1`.
Data type must be float32 or float64.
- **sequence_length** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not greater than `max_time`.
Outputs:
- **decoded_indices** (Tensor) - A tensor with shape of :math:`(total_decoded_outputs, 2)`.
Data type is int64.
- **decoded_values** (Tensor) - A tensor with shape of :math:`(total_decoded_outputs)`,
it stores the decoded classes. Data type is int64.
- **decoded_shape** (Tensor) - The value of tensor is :math:`[batch_size, max_decoded_legth]`.
Data type is int64.
- **log_probability** (Tensor) - A tensor with shape of :math:`(batch_size, 1)`,
containing sequence log-probability, has the same type as `inputs`.
Examples:
>>> class CTCGreedyDecoderNet(nn.Cell):
>>> def __init__(self):
>>> super(CTCGreedyDecoderNet, self).__init__()
>>> self.ctc_greedy_decoder = P.CTCGreedyDecoder()
>>> self.assert_op = P.Assert(300)
>>>
>>> def construct(self, inputs, sequence_length):
>>> out = self.ctc_greedy_decoder(inputs,sequence_length)
>>> self.assert_op(True, (out[0], out[1], out[2], out[3]))
>>> return out[2]
>>>
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32)
>>> net = CTCGreedyDecoderNet()
>>> output = net(inputs, sequence_length)
"""
@prim_attr_register
def __init__(self, merge_repeated=True):
self.merge_repeated = validator.check_value_type("merge_repeated", merge_repeated, [bool], self.name)
def infer_shape(self, inputs_shape, sequence_length_shape):
validator.check_integer("inputs rank", len(inputs_shape), 3, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length_shape), 1, Rel.EQ, self.name)
validator.check('inputs batch_size', inputs_shape[1], 'sequence_length batch_size',
sequence_length_shape[0], Rel.EQ, self.name)
total_decoded_outputs = -1
decoded_indices_shape = [total_decoded_outputs, 2]
decoded_values = [total_decoded_outputs]
decoded_shape = [2]
log_probability_shape = [inputs_shape[1], 1]
return decoded_indices_shape, decoded_values, decoded_shape, log_probability_shape
def infer_dtype(self, inputs_dtype, sequence_length_dtype):
validator.check_tensor_type_same({"inputs_dtype": inputs_dtype}, [mstype.float32, mstype.double], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length_dtype}, [mstype.int32], self.name)
decoded_type = mstype.tensor_type(mstype.int64)
return decoded_type, decoded_type, decoded_type, inputs_dtype
class BasicLSTMCell(PrimitiveWithInfer):
r"""
Applies the long short-term memory (LSTM) to the input.
.. math::
\begin{array}{ll} \\
i_t = \sigma(W_{ix} x_t + b_{ix} + W_{ih} h_{(t-1)} + b_{ih}) \\
f_t = \sigma(W_{fx} x_t + b_{fx} + W_{fh} h_{(t-1)} + b_{fh}) \\
\tilde{c}_t = \tanh(W_{cx} x_t + b_{cx} + W_{ch} h_{(t-1)} + b_{ch}) \\
o_t = \sigma(W_{ox} x_t + b_{ox} + W_{oh} h_{(t-1)} + b_{oh}) \\
c_t = f_t * c_{(t-1)} + i_t * \tilde{c}_t \\
h_t = o_t * \tanh(c_t) \\
\end{array}
Here :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. :math:`W, b`
are learnable weights between the output and the input in the formula. For instance,
:math:`W_{ix}, b_{ix}` are the weight and bias used to transform from input :math:`x` to :math:`i`.
Details can be found in paper `LONG SHORT-TERM MEMORY
<https://www.bioinf.jku.at/publications/older/2604.pdf>`_ and
`Long Short-Term Memory Recurrent Neural Network Architectures for Large Scale Acoustic Modeling
<https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/43905.pdf>`_.
Args:
keep_prob (float): If not 1.0, append `Dropout` layer on the outputs of each
LSTM layer except the last layer. Default 1.0. The range of dropout is [0.0, 1.0].
forget_bias (float): Add forget bias to forget gate biases in order to decrease former scale. Default: 1.0.
state_is_tuple (bool): If true, the state is a tuple of 2 tensors, containing h and c; If false, the state is
a tensor and it needs to be split first. Default: True.
activation (str): Activation. Default: "tanh". Only "tanh" is currently supported.
Inputs:
- **x** (Tensor) - Current words. Tensor of shape (`batch_size`, `input_size`).
The data type must be float16 or float32.
- **h** (Tensor) - Hidden state last moment. Tensor of shape (`batch_size`, `hidden_size`).
The data type must be float16 or float32.
- **c** (Tensor) - Cell state last moment. Tensor of shape (`batch_size`, `hidden_size`).
The data type must be float16 or float32.
- **w** (Tensor) - Weight. Tensor of shape (`input_size + hidden_size`, `4 x hidden_size`).
The data type must be float16 or float32.
- **b** (Tensor) - Bias. Tensor of shape (`4 x hidden_size`).
The data type must be the same as `c`.
Outputs:
- **ct** (Tensor) - Forward :math:`c_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ht** (Tensor) - Cell output. Tensor of shape (`batch_size`, `hidden_size`). With data type of float16.
- **it** (Tensor) - Forward :math:`i_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **jt** (Tensor) - Forward :math:`j_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ft** (Tensor) - Forward :math:`f_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ot** (Tensor) - Forward :math:`o_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **tanhct** (Tensor) - Forward :math:`tanh c_t` cache at moment `t`.
Tensor of shape (`batch_size`, `hidden_size`), has the same type with input `c`.
Examples:
>>> x = Tensor(np.random.rand(1, 32).astype(np.float16))
>>> h = Tensor(np.random.rand(1, 64).astype(np.float16))
>>> c = Tensor(np.random.rand(1, 64).astype(np.float16))
>>> w = Tensor(np.random.rand(96, 256).astype(np.float16))
>>> b = Tensor(np.random.rand(256, ).astype(np.float16))
>>> lstm = P.BasicLSTMCell(keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh')
>>> lstm(x, h, c, w, b)
"""
@prim_attr_register
def __init__(self, keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh'):
self.keep_prob = validator.check_value_type("keep_prob", keep_prob, [float], self.name)
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0.0, 1.0, Rel.INC_BOTH, self.name)
self.forget_bias = validator.check_value_type("forget_bias", forget_bias, [float], self.name)
self.state_is_tuple = validator.check_value_type("state_is_tuple", state_is_tuple, [bool], self.name)
self.activation = validator.check_string("activation", activation, ['tanh'], self.name)
self.add_prim_attr("io_format", "ND")
def infer_shape(self, x_shape, h_shape, c_shape, w_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.EQ, self.name)
validator.check_integer("h rank", len(h_shape), 2, Rel.EQ, self.name)
validator.check_integer("c rank", len(c_shape), 2, Rel.EQ, self.name)
validator.check_integer("w rank", len(w_shape), 2, Rel.EQ, self.name)
validator.check_integer("b rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("x_shape[0]", x_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[0]", c_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[1]", c_shape[1], "h_shape[1]", h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[1]", w_shape[1], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[0]", w_shape[0], "x_shape[1]+h_shape[1]", x_shape[1] + h_shape[1], Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
ct_shape = c_shape
ht_shape = c_shape
it_shape = c_shape
jt_shape = c_shape
ft_shape = c_shape
ot_shape = c_shape
tanhct_shape = c_shape
return (ct_shape, ht_shape, it_shape, jt_shape, ft_shape, ot_shape, tanhct_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype, b_dtype):
validator.check_tensor_type_same({"x_dtype": x_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"h_dtype": h_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"w_dtype": w_dtype}, [mstype.float16, mstype.float32], self.name)
args = {"c_dtype": c_dtype, "b_dtype": b_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
return (c_dtype, mstype.float16, c_dtype, c_dtype, c_dtype, c_dtype, c_dtype)
class InTopK(PrimitiveWithInfer):
r"""
Whether the targets are in the top `k` predictions.
Args:
k (int): Specify the number of top elements to be used for computing precision.
Inputs:
- **x1** (Tensor) - A 2D Tensor defines the predictions of a batch of samples with float16 or float32 data type.
- **x2** (Tensor) - A 1D Tensor defines the labels of a batch of samples with int32 data type.
Outputs:
Tensor has 1 dimension of type bool and the same shape with `x2`. For labeling sample `i` in `x2`,
if the label in the first `k` predictions for sample `i` is in `x1`, then the value is True, otherwise False.
Examples:
>>> x1 = Tensor(np.array([[1, 8, 5, 2, 7], [4, 9, 1, 3, 5]]), mindspore.float32)
>>> x2 = Tensor(np.array([1, 3]), mindspore.int32)
>>> in_top_k = P.InTopK(3)
>>> result = in_top_k(x1, x2)
[True False]
"""
@prim_attr_register
def __init__(self, k):
"""Init InTopK"""
self.init_prim_io_names(inputs=['x1', 'x2', 'k'], outputs=['y'])
validator.check_value_type("k", k, [int], self.name)
def infer_dtype(self, x1_dtype, x2_dtype):
validator.check_tensor_type_same({"x1": x1_dtype}, (mstype.float16, mstype.float32,), self.name)
validator.check_tensor_type_same({"x2": x2_dtype}, (mstype.int32,), self.name)
return mstype.tensor_type(mstype.bool_)
def infer_shape(self, x1_shape, x2_shape):
validator.check("x1", len(x1_shape), "", 2, Rel.EQ, self.name)
validator.check("x2", len(x2_shape), "", 1, Rel.EQ, self.name)
validator.check("size of x2", x2_shape[0], "x1's first dimension", x1_shape[0], Rel.EQ, self.name)
return x2_shape
class LRN(PrimitiveWithInfer):
r"""
Local Response Normalization
Args:
depth_radius (int): Half-width of the 1-D normalization window. Shape of 0-D.
bias (float): An offset (usually positive to avoid dividing by 0).
alpha (float): A scale factor, usually positive.
beta (float): An exponent.
norm_region (str): Specify normalization region. Options: "ACROSS_CHANNELS". Default: "ACROSS_CHANNELS".
Inputs:
- **x** (Tensor) - A 4D Tensor with float16 or float32 data type.
Outputs:
Tensor, With shape and data type same as the input tensor.
Examples:
>>> x = Tensor(np.random.rand(1, 10, 4, 4)), mindspore.float32)
>>> lrn = P.LRN()
>>> lrn(x)
"""
@prim_attr_register
def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, norm_region="ACROSS_CHANNELS"):
"""Init LRN"""
self.init_prim_io_names(inputs=['x'], outputs=['y'])
validator.check_value_type("depth_radius", depth_radius, [int], self.name)
validator.check_value_type("bias", bias, [float], self.name)
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_value_type("beta", beta, [float], self.name)
validator.check_value_type("norm_region", norm_region, [str], self.name)
validator.check_string('norm_region', norm_region, ['ACROSS_CHANNELS'], self.name)
validator.check_integer("depth_radius", depth_radius, 0, Rel.GE, self.name)
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32,), self.name)
return x_dtype
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 4, Rel.EQ, self.name)
return x_shape
class CTCLossV2(PrimitiveWithInfer):
r"""
Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.
Note:
- Cudnn Uses label value of for the `blank`
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_class)`. `num_class` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved.
- **labels** (Tensor) - The labels Tensor should be a `1-D` tensor whose shape is
:math:`(\sigma{label_lengths})`
or `2-D` tensor whose shape is
:math:`(max_time, max{label_lengths})`
The type must be int32.
- **input_lengths** (Tensor) - A `1-D` input tensor whose shape is
:math:`(batch_size,)`. The values should be batch. The type must be int32.
- **label_lengths** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not greater than `max_time`.
Outputs:
- **loss** (Tensor) - A tensor containing log-probabilities, the shape is :math:`(batch_size)`, has the same
type with `inputs`.
- **gradient** (Tensor) - The gradient of `loss`, has the same type and shape with `inputs`.
Examples:
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> labels = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
>>> input_lengths = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> label_lengths = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> ctc_loss = P.CTCLossV2()
>>> output = ctc_loss(inputs, labels, input_lengths, label_lengths)
"""
@prim_attr_register
def __init__(self):
pass
def infer_dtype(self, input_dtype, labels_dtype, input_lengths_dtype, label_lengths_dtype):
validator.check_tensor_type_same({"input": input_dtype}, (mstype.float32,), self.name)
validator.check_tensor_type_same({"labels": labels_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"input_lengths": input_lengths_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"target_lengths": label_lengths_dtype}, (mstype.int32,), self.name)
return mstype.float32, mstype.float32
def infer_shape(self, input_shape, labels_shape, input_lengths_shape, label_lengths_shape):
validator.check_integer("input shape", len(input_shape), 3, Rel.EQ, self.name)
validator.check_number_range("labels shape", len(labels_shape), 1, 2, Rel.INC_BOTH, self.name)
validator.check_integer("input lengths shape", len(input_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("label lengths shape", len(label_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], input_lengths_shape[0], Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], label_lengths_shape[0], Rel.EQ, self.name)
return (input_shape[1],), input_shape
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Operators for nn."""
import math
import operator
from functools import reduce
import numpy as np
from ... import context
from .. import signature as sig
from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ...common import dtype as mstype
from ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register
from ..operations.math_ops import _infer_shape_reduce
def _check_positive_int_or_tuple(arg_name, arg_value, prim_name, allow_four=False, ret_four=False):
"""
Checks whether an argument is a positive int or tuple with 2 or 4(when allow_four is True) positive int elements.
"""
def _raise_message():
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of two "
f"{'or four ' if allow_four else ''}positive int numbers, but got {arg_value}")
def _get_return_value():
if isinstance(arg_value, int):
ret = (1, 1, arg_value, arg_value) if ret_four else (arg_value, arg_value)
elif len(arg_value) == 2:
ret = (1, 1, arg_value[0], arg_value[1]) if ret_four else arg_value
elif len(arg_value) == 4:
if not allow_four:
_raise_message()
ret = arg_value if ret_four else (arg_value[2], arg_value[3])
else:
_raise_message()
return ret
validator.check_value_type(arg_name, arg_value, (int, tuple), prim_name)
ret_value = _get_return_value()
for item in ret_value:
if isinstance(item, int) and item > 0:
continue
_raise_message()
return ret_value
class Flatten(PrimitiveWithInfer):
r"""
Flattens a tensor without changing its batch size on the 0-th axis.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)` to be flattened.
Outputs:
Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
the product of the remaining dimension.
Examples:
>>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
>>> flatten = P.Flatten()
>>> output = flatten(input_tensor)
>>> assert output.shape == (1, 24)
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
validator.check_integer('input_x rank', len(input_x), 1, Rel.GE, self.name)
prod = 1 if len(input_x) == 1 else reduce(operator.mul, input_x[1:])
return input_x[0], prod
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class Softmax(PrimitiveWithInfer):
r"""
Softmax operation.
Applies the Softmax operation to the input tensor on the specified axis.
Suppose a slice in the given aixs :math:`x` then for each element :math:`x_i`
the Softmax function is shown as follows:
.. math::
\text{output}(x_i) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)},
where :math:`N` is the length of the tensor.
Args:
axis (Union[int, tuple]): The axis to do the Softmax operation. Default: -1.
Inputs:
- **logits** (Tensor) - The input of Softmax, with float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the logits.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softmax = P.Softmax()
>>> softmax(input_x)
[0.01165623, 0.03168492, 0.08612854, 0.23412167, 0.6364086]
"""
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type("axis", axis, [int, tuple], self.name)
if isinstance(axis, int):
self.add_prim_attr('axis', (axis,))
for item in self.axis:
validator.check_value_type("item of axis", item, [int], self.name)
def infer_shape(self, logits):
validator.check_integer("length of axis", len(self.axis), 1, Rel.GE, self.name)
rank = len(logits)
for axis_v in self.axis:
validator.check_int_range("axis", axis_v, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class LogSoftmax(PrimitiveWithInfer):
r"""
Log Softmax activation function.
Applies the Log Softmax function to the input tensor on the specified axis.
Suppose a slice in the given aixs :math:`x` then for each element :math:`x_i`
the Log Softmax function is shown as follows:
.. math::
\text{output}(x_i) = \log \left(\frac{exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
where :math:`N` is the length of the Tensor.
Args:
axis (int): The axis to do the Log softmax operation. Default: -1.
Inputs:
- **logits** (Tensor) - The input of Log Softmax, with float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the logits.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> log_softmax = P.LogSoftmax()
>>> log_softmax(input_x)
[-4.4519143, -3.4519143, -2.4519143, -1.4519144, -0.4519144]
"""
@prim_attr_register
def __init__(self, axis=-1):
validator.check_value_type("axis", axis, [int], self.name)
def infer_shape(self, logits):
rank = len(logits)
validator.check_int_range('axis', self.axis, -rank, rank, Rel.INC_LEFT, self.name)
return logits
def infer_dtype(self, logits):
validator.check_subclass("logits", logits, mstype.tensor, self.name)
validator.check_tensor_type_same({"logits": logits}, mstype.float_type, self.name)
return logits
class Softplus(PrimitiveWithInfer):
r"""
Softplus activation function.
Softplus is a smooth approximation to the ReLU function.
The function is shown as follows:
.. math::
\text{output} = \log(1 + \exp(\text{input_x})),
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softplus = P.Softplus()
>>> softplus(input_x)
[1.3132615, 2.126928, 3.0485873, 4.01815, 5.0067153]
"""
@prim_attr_register
def __init__(self):
"""init Softplus"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class Softsign(PrimitiveWithInfer):
r"""
Softsign activation function.
The function is shown as follows:
.. math::
\text{output} = \frac{\text{input_x}}{1 + \left| \text{input_x} \right|},
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)
>>> softsign = P.Softsign()
>>> softsign(input_x)
[0. -0.5 0.6666667 0.9677419 -0.9677419]
"""
@prim_attr_register
def __init__(self):
"""init Softsign"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class ReLU(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) of input tensor element-wise.
It returns :math:`\max(x,\ 0)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu = P.ReLU()
>>> result = relu(input_x)
[[0, 4.0, 0.0], [2.0, 0.0, 9.0]]
"""
@prim_attr_register
def __init__(self):
"""init ReLU"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.number_type, self.name)
return input_x
class ReLU6(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) upper bounded by 6 of input tensor element-wise.
It returns :math:`\min(\max(0,x), 6)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor. With float16 or float32 data type.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> relu6 = P.ReLU6()
>>> result = relu6(input_x)
"""
@prim_attr_register
def __init__(self):
"""init ReLU6"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class ReLUV2(PrimitiveWithInfer):
r"""
Computes ReLU(Rectified Linear Unit) of input tensor element-wise.
It returns :math:`\max(x,\ 0)` element-wise.
Inputs:
- **input_x** (Tensor) - The input tensor should be a 4-D tensor.
Outputs:
- **output** (Tensor) - Has the same type and shape as the `input_x`.
- **mask** (Tensor) - A tensor whose data type must be uint8.
Examples:
>>> input_x = Tensor(np.array([[[[1, -2], [-3, 4]], [[-5, 6], [7, -8]]]]), mindspore.float32)
>>> relu_v2 = P.ReLUV2()
>>> output = relu_v2(input_x)
([[[[1., 0.], [0., 4.]], [[0., 6.], [7., 0.]]]],
[[[[1, 0], [2, 0]], [[2, 0], [1, 0]]]])
"""
@prim_attr_register
def __init__(self):
"""init ReLUV2"""
self.init_prim_io_names(inputs=['x'], outputs=['output', 'mask'])
def __infer__(self, input_x):
input_shape = list(input_x['shape'])
input_dtype = input_x['dtype']
mask_shape = []
if len(input_shape) != 4:
raise ValueError("The `input_x` should be a 4-D tensor, "
f"but got a {len(input_shape)}-D tensor whose shape is {input_shape}")
for i in enumerate(input_shape):
if i[0] == 1:
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append((input_shape[1] + 31) // 32)
else:
mask_shape.append((input_shape[1] + 15) // 16)
else:
mask_shape.append(i[1])
if input_dtype == mstype.uint8 and input_dtype == mstype.int8:
mask_shape.append(4)
else:
mask_shape.append(2)
output_shape = (input_x['shape'], mask_shape)
validator.check_subclass("input_x", input_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({'input_x': input_dtype}, mstype.number_type, self.name)
mask_dtype = mstype.uint8
output_dtype = (input_dtype, mask_dtype)
return {'shape': output_shape,
'dtype': output_dtype,
'value': None}
class Elu(PrimitiveWithInfer):
r"""
Computes exponential linear: `alpha * (exp(x) - 1)` if x < 0, `x` otherwise.
The data type of input tensor should be float.
Args:
alpha (float): The coefficient of negative factor whose type is float,
only support '1.0' currently. Default: 1.0.
Inputs:
- **input_x** (Tensor) - The input tensor whose data type should be float.
Outputs:
Tensor, has the same shape and data type as `input_x`.
Examples:
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> elu = P.Elu()
>>> result = elu(input_x)
Tensor([[-0.632 4.0 -0.999]
[2.0 -0.993 9.0 ]], shape=(2, 3), dtype=mindspore.float32)
"""
@prim_attr_register
def __init__(self, alpha=1.0):
"""Init Elu"""
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_number("alpha", alpha, 1.0, Rel.EQ, self.name)
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)
return input_x
class HSwish(PrimitiveWithInfer):
r"""
Hard swish activation function.
Applies hswish-type activation element-wise. The input is a Tensor with any valid shape.
Hard swish is defined as:
.. math::
\text{hswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6},
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Inputs:
- **input_data** (Tensor) - The input of HSwish, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> hswish = P.HSwish()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hswish(input_x)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, xshape):
return xshape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Sigmoid(PrimitiveWithInfer):
r"""
Sigmoid activation function.
Computes Sigmoid of input element-wise. The Sigmoid function is defined as:
.. math::
\text{sigmoid}(x_i) = \frac{1}{1 + exp(-x_i)},
where :math:`x_i` is the element of the input.
Inputs:
- **input_x** (Tensor) - The input of Sigmoid, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the input_x.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> sigmoid = P.Sigmoid()
>>> sigmoid(input_x)
[0.73105866, 0.880797, 0.9525742, 0.98201376, 0.9933071]
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class HSigmoid(PrimitiveWithInfer):
r"""
Hard sigmoid activation function.
Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape.
Hard sigmoid is defined as:
.. math::
\text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})),
where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
Inputs:
- **input_data** (Tensor) - The input of HSigmoid, data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the `input_data`.
Examples:
>>> hsigmoid = P.HSigmoid()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hsigmoid(input_x)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
return x_dtype
class Tanh(PrimitiveWithInfer):
r"""
Tanh activation function.
Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
.. math::
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
where :math:`x_i` is an element of the input Tensor.
Inputs:
- **input_x** (Tensor) - The input of Tanh.
Outputs:
Tensor, with the same type and shape as the input_x.
Examples:
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> tanh = P.Tanh()
>>> tanh(input_x)
[0.7615941, 0.9640276, 0.9950548, 0.9993293, 0.99990916]
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("input_x", input_x, mstype.tensor, self.name)
return input_x
class FusedBatchNorm(Primitive):
r"""
FusedBatchNorm is a BatchNorm that moving mean and moving variance will be computed instead of being loaded.
Batch Normalization is widely used in convolutional networks. This operation applies
Batch Normalization over input to avoid internal covariate shift as described in the
paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
feature using a mini-batch of data and the learned parameters which can be described
in the following formula.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
mode (int): Mode of batch normalization, value is 0 or 1. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
momentum (float): The hyper parameter to compute moving average for running_mean and running_var
(e.g. :math:`new\_running\_mean = momentum * running\_mean + (1 - momentum) * current\_mean`).
Momentum value should be [0, 1]. Default: 0.9.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, C)`.
- **scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`.
Outputs:
Tuple of 5 Tensor, the normalized input and the updated parameters.
- **output_x** (Tensor) - The same type and shape as the `input_x`.
- **updated_scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_moving_mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_moving_variance** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> op = P.FusedBatchNorm()
>>> output = op(input_x, scale, bias, mean, variance)
"""
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
class FusedBatchNormEx(PrimitiveWithInfer):
r"""
FusedBatchNormEx is an extension of FusedBatchNorm, FusedBatchNormEx has one more output(output reserve)
than FusedBatchNorm, reserve will be used in backpropagation phase. FusedBatchNorm is a BatchNorm that
moving mean and moving variance will be computed instead of being loaded.
Batch Normalization is widely used in convolutional networks. This operation applies
Batch Normalization over input to avoid internal covariate shift as described in the
paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
feature using a mini-batch of data and the learned parameters which can be described
in the following formula.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
mode (int): Mode of batch normalization, value is 0 or 1. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
momentum (float): The hyper parameter to compute moving average for running_mean and running_var
(e.g. :math:`new\_running\_mean = momentum * running\_mean + (1 - momentum) * current\_mean`).
Momentum value should be [0, 1]. Default: 0.9.
Inputs:
- **input_x** (Tensor) - The input of FusedBatchNormEx, Tensor of shape :math:`(N, C)`,
data type: float16 or float32.
- **scale** (Tensor) - Parameter scale, same with gamma above-mentioned, Tensor of shape :math:`(C,)`,
data type: float32.
- **bias** (Tensor) - Parameter bias, same with beta above-mentioned, Tensor of shape :math:`(C,)`,
data type: float32.
- **mean** (Tensor) - mean value, Tensor of shape :math:`(C,)`, data type: float32.
- **variance** (Tensor) - variance value, Tensor of shape :math:`(C,)`, data type: float32.
Outputs:
Tuple of 6 Tensors, the normalized input, the updated parameters and reserve.
- **output_x** (Tensor) - The input of FusedBatchNormEx, same type and shape as the `input_x`.
- **updated_scale** (Tensor) - Updated parameter scale, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_bias** (Tensor) - Updated parameter bias, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_moving_mean** (Tensor) - Updated mean value, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_moving_variance** (Tensor) - Updated variance value, Tensor of shape :math:`(C,)`,
data type: float32.
- **reserve** (Tensor) - reserve space, Tensor of shape :math:`(C,)`, data type: float32.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> op = P.FusedBatchNormEx()
>>> output = op(input_x, scale, bias, mean, variance)
"""
__mindspore_signature__ = (
sig.make_sig('input_x', dtype=sig.sig_dtype.T2),
sig.make_sig('scale', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('bias', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('mean', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('variance', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):
self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'save_scale', 'save_bias', 'save_mean', 'save_inv_variance', 'reserve'])
self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)
self._update_parameter = True
self.add_prim_attr('data_format', "NCHW")
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
valid_types = [mstype.tensor_type(mstype.float32)]
validator.check_type_same(args_moving, valid_types, self.name)
return (input_x, scale, scale, scale, scale, scale)
class BNTrainingReduce(PrimitiveWithInfer):
"""
reduce sum at axis [0, 2, 3].
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N, C)`.
Outputs:
- **sum** (Tensor) - Tensor of shape :math:`(C,)`.
- **square_sum** (Tensor) - Tensor of shape :math:`(C,)`.
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x'], outputs=['sum', 'square_sum'])
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
return ([x_shape[1]], [x_shape[1]])
def infer_dtype(self, x_type):
return (x_type, x_type)
class BNTrainingUpdate(PrimitiveWithInfer):
"""
The primitive operator of the register and info descriptor in bn_training_update.
"""
@prim_attr_register
def __init__(self, isRef=True, epsilon=1e-5, factor=0.1):
self.init_prim_io_names(inputs=['x', 'sum', 'square_sum', 'scale', 'b', 'mean', 'variance'],
outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])
#self.isRef = validator.check_integer('isRef', isRef, [0, 1], Rel.IN)
self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, 'BNTrainingUpdate')
self.factor = validator.check_number_range('factor', factor, 0, 1, Rel.INC_BOTH, 'BNTrainingUpdate')
def infer_shape(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
def infer_dtype(self, x, sum, square_sum, scale, b, mean, variance):
return (x, variance, variance, variance, variance)
class BatchNorm(PrimitiveWithInfer):
r"""
Batch Normalization for input data and updated parameters.
Batch Normalization is widely used in convolutional neural networks. This operation
applies Batch Normalization over input to avoid internal covariate shift as described
in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
features using a mini-batch of data and the learned parameters which can be described
in the following formula,
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
is_training (bool): If `is_training` is True, `mean` and `variance` are computed during training.
If `is_training` is False, they're loaded from checkpoint during inference. Default: False.
epsilon (float): A small value added for numerical stability. Default: 1e-5.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, C)`, with float16 or float32 data type.
- **scale** (Tensor) - Tensor of shape :math:`(C,)`, with float16 or float32 data type.
- **bias** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `scale`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`, with float16 or float32 data type.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `mean`.
Outputs:
Tuple of 5 Tensor, the normalized inputs and the updated parameters.
- **output_x** (Tensor) - The same type and shape as the input_x. The shape is :math:`(N, C)`.
- **updated_scale** (Tensor) - Tensor of shape :math:`(C,)`.
- **updated_bias** (Tensor) - Tensor of shape :math:`(C,)`.
- **reserve_space_1** (Tensor) - Tensor of shape :math:`(C,)`.
- **reserve_space_2** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> batch_norm = P.BatchNorm()
>>> output = batch_norm(input_x, scale, bias, mean, variance)
"""
@prim_attr_register
def __init__(self, is_training=False, epsilon=1e-5):
validator.check_value_type('is_training', is_training, (bool,), self.name)
validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)
self.add_prim_attr('data_format', "NCHW")
self.init_prim_io_names(inputs=['x', 'scale', 'offset', 'mean', 'variance'],
outputs=['y', 'batch_mean', 'batch_variance', 'reserve_space_1', 'reserve_space_2'])
def infer_shape(self, input_x, scale, bias, mean, variance):
validator.check_integer("scale rank", len(scale), 1, Rel.EQ, self.name)
validator.check("scale shape", scale, "bias shape", bias, Rel.EQ, self.name)
validator.check("scale shape[0]", scale[0], "input_x shape[1]", input_x[1], Rel.EQ, self.name)
if not self.is_training:
validator.check_integer("mean rank", len(mean), 1, Rel.EQ, self.name)
validator.check("mean shape", mean, "variance shape", variance, Rel.EQ, self.name)
validator.check("mean shape", mean, "scale shape", scale, Rel.EQ, self.name)
return (input_x, scale, scale, scale, scale)
def infer_dtype(self, input_x, scale, bias, mean, variance):
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
args = {"scale": scale, "bias": bias}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
args_moving = {"mean": mean, "variance": variance}
if self.is_training:
valid_types = [mstype.tensor_type(mstype.float16), mstype.tensor_type(mstype.float32), None]
validator.check_type_same(args_moving, valid_types, self.name)
else:
args_moving = {"mean": mean, "variance": variance}
validator.check_tensor_type_same(args_moving, [mstype.float16, mstype.float32], self.name)
return (input_x, scale, bias, input_x, input_x)
class Conv2D(PrimitiveWithInfer):
r"""
2D convolution layer.
Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
where :math:`N` is batch size and :math:`C_{in}` is channel number. For each batch of shape
:math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_. More detailed introduction can be found here:
http://cs231n.github.io/convolutional-networks/.
Args:
out_channel (int): The dimension of the output.
kernel_size (Union[int, tuple[int]]): The kernel size of the 2D convolution.
mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union(int, tuple[int])): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the
padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly.
stride (Union(int, tuple[int])): The stride to be applied to the convolution filter. Default: 1.
dilation (Union(int, tuple[int])): Specify the space to use between kernel elements. Default: 1.
group (int): Split input into groups. Default: 1.
Returns:
Tensor, the value that applied 2D convolution.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
- **weight** (Tensor) - Set size of kernel is :math:`(K_1, K_2)`, then the shape is
:math:`(C_{out}, C_{in}, K_1, K_2)`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
>>> conv2d = P.Conv2D(out_channel=32, kernel_size=3)
>>> conv2d(input, weight)
"""
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
mode=1,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
"""init Conv2D"""
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check(f"x_shape[1] / group", x_shape[1] // self.group, "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('out_channel', self.out_channel, 'w_shape[0]', w_shape[0], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_h = w_shape[2]
kernel_size_w = w_shape[3]
stride_h = self.stride[2]
stride_w = self.stride[3]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = [pad_top, pad_bottom, pad_left, pad_right]
self.add_prim_attr('pad_list', (pad_top, pad_bottom, pad_left, pad_right))
out_channel = self.out_channel
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class DepthwiseConv2dNative(PrimitiveWithInfer):
r"""
Returns the depth-wise convolution value for the input.
Applies depthwise conv2d for the input, which will generate more channels with channel_multiplier.
Given an input tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` where :math:`N` is the batch size and a
filter tensor with kernel size :math:`(ks_{h}, ks_{w})`, containing :math:`C_{in} * \text{channel_multiplier}`
convolutional filters of depth 1; it applies different filters to each input channel (channel_multiplier channels
for each input channel has the default value 1), then concatenates the results together. The output has
:math:`\text{in_channels} * \text{channel_multiplier}` channels.
Args:
channel_multiplier (int): The multipiler for the original output convolution. Its value must be greater than 0.
kernel_size (Union[int, tuple[int]]): The size of the convolution kernel.
mode (int): Modes for different convolutions. 0 Math convolution, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 3.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union[int, tuple[int]]): The pad value to be filled. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the padding
of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly. Default: 0.
stride (Union[int, tuple[int]]): The stride to be applied to the convolution filter. Default: 1.
dilation (Union[int, tuple[int]]): Specifies the dilation rate to be used for the dilated convolution.
Default: 1.
group (int): Splits input into groups. Default: 1.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
- **weight** (Tensor) - Set the size of kernel as :math:`(K_1, K_2)`, then the shape is
:math:`(K, C_{in}, K_1, K_2)`, `K` must be 1.
Outputs:
Tensor of shape :math:`(N, C_{in} * \text{channel_multiplier}, H_{out}, W_{out})`.
Examples:
>>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
>>> weight = Tensor(np.ones([1, 32, 3, 3]), mindspore.float32)
>>> depthwise_conv2d = P.DepthwiseConv2dNative(channel_multiplier = 3, kernel_size = (3, 3))
>>> output = depthwise_conv2d(input, weight)
>>> output.shape == (10, 96, 30, 30)
"""
@prim_attr_register
def __init__(self,
channel_multiplier,
kernel_size,
mode=3,
pad_mode="valid",
pad=0,
stride=1,
dilation=1,
group=1):
"""init DepthwiseConv2dNative"""
self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name)
if self.stride[0] != self.stride[1]:
raise ValueError("The height and width of stride should be equal,"
f"but got height:{self.stride[0]}, width:{self.stride[1]}")
self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1]))
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name)
if self.dilation[0] != self.dilation[1]:
raise ValueError("The height and width of dilation should be equal,"
f"but got height:{self.dilation[0]}, width:{self.dilation[1]}")
self.add_prim_attr('dilation', (1, 1, self.dilation[0], self.dilation[1]))
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
self.mode = validator.check_integer("mode", mode, 3, Rel.EQ, self.name)
self.add_prim_attr('data_format', "NCHW")
self.channel_multiplier = validator.check_integer("channel_multiplier", channel_multiplier, 0, Rel.GT,
self.name)
self.group = validator.check_integer("group", group, 0, Rel.GT, self.name)
self.add_prim_attr('offset_a', 0)
def infer_shape(self, x_shape, w_shape, b_shape=None):
validator.check_integer("weight rank", len(w_shape), 4, Rel.EQ, self.name)
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
validator.check("x_shape[1]", x_shape[1], "w_shape[1]", w_shape[1], Rel.EQ, self.name)
validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)
kernel_size_n, _, kernel_size_h, kernel_size_w = w_shape
_, _, stride_h, stride_w = self.stride
_, _, dilation_h, dilation_w = self.dilation
if kernel_size_n != 1:
raise ValueError(f"The batch of input weight should be 1, but got {kernel_size_n}")
if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0
elif self.pad_mode == "same":
h_out = math.ceil(x_shape[2] / stride_h)
w_out = math.ceil(x_shape[3] / stride_w)
pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
elif self.pad_mode == 'pad':
pad_top, pad_bottom, pad_left, pad_right = self.padding
h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \
/ stride_h
w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \
/ stride_w
h_out = math.floor(h_out)
w_out = math.floor(w_out)
self.pad_list = (pad_top, pad_bottom, pad_left, pad_right)
self.add_prim_attr('pads', self.pad_list)
out_channel = self.channel_multiplier * x_shape[1]
out_shape = [x_shape[0], out_channel, h_out, w_out]
return out_shape
def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):
args = {'x': x_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
if x_dtype.element_type() == mstype.int8:
return mstype.tensor_type(mstype.int32)
return x_dtype
class _Pool(PrimitiveWithInfer):
r"""
Performs max/avg pooling operation.
Args:
ksize (Union[int, tuple[int]]): The size of the kernel, that should be a tuple
of two `int` for height and width. Default: 1.
strides (Union[int, tuple[int]]): The stride of the window, that should be
a tuple of two `int` for height and width. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
self.init_prim_io_names(inputs=['x'], outputs=['output'])
validator.check_value_type('ksize', ksize, [int, tuple], self.name)
validator.check_value_type('strides', strides, [int, tuple], self.name)
self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME'], self.name)
self.add_prim_attr("padding", self.padding)
self.is_maxpoolwithargmax = (self.name == "MaxPoolWithArgmax")
if not self.is_maxpoolwithargmax:
self.add_prim_attr('data_format', "NCHW")
self.ksize = _check_positive_int_or_tuple("ksize", ksize, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.ksize = (1, self.ksize[-2], self.ksize[-1], 1)
self.add_prim_attr("ksize", self.ksize)
self.strides = _check_positive_int_or_tuple("strides", strides, self.name, allow_four=False, ret_four=True)
if self.is_maxpoolwithargmax:
self.strides = (1, self.strides[-2], self.strides[-1], 1)
self.add_prim_attr("strides", self.strides)
def infer_shape(self, x_shape):
validator.check_integer("x rank", len(x_shape), 4, Rel.EQ, self.name)
batch, channel, input_h, input_w = x_shape
if self.is_maxpoolwithargmax:
_, kernel_h, kernel_w, _ = self.ksize
_, stride_h, stride_w, _ = self.strides
else:
_, _, kernel_h, kernel_w = self.ksize
_, _, stride_h, stride_w = self.strides
if self.padding == "VALID":
out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h)
out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w)
elif self.padding == "SAME":
out_h = math.ceil(input_h / stride_h)
out_w = math.ceil(input_w / stride_w)
out_shape = [batch, channel, out_h, out_w]
for shape_value in out_shape:
if shape_value <= 0:
raise ValueError(f"For '{self.name}' The kernel size is not valid, "
f"please check it if is larger than data's shape size.")
return out_shape
def infer_dtype(self, x_dtype):
validator.check_subclass("input", x_dtype, mstype.tensor, self.name)
return x_dtype
class MaxPool(_Pool):
r"""
Max pooling operation.
Applies a 2D max pooling over an input Tensor which can be regarded as a composition of 2D planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_op = P.MaxPool(padding="VALID", ksize=2, strides=1)
>>> output_tensor = maxpool_op(input_tensor)
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPool, self).__init__(ksize, strides, padding)
class MaxPoolWithArgmax(_Pool):
r"""
Performs max pooling on the input Tensor and return both max values and indices.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value and arg value,
is an int number that represents height and width are both ksize, or a tuple of
two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Data type should be float16 or float32.
Outputs:
Tuple of 2 Tensor, the maxpool result and where max values from.
- **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
- **mask** (Tensor) - Max values' index represented by the mask.
Examples:
>>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)
>>> maxpool_arg_op = P.MaxPoolWithArgmax(padding="VALID", ksize=2, strides=1)
>>> output_tensor, argmax = maxpool_arg_op(input_tensor)
"""
def __init__(self, ksize=1, strides=1, padding="valid"):
super(MaxPoolWithArgmax, self).__init__(ksize, strides, padding)
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_gpu = context.get_context("device_target") == "GPU"
def infer_shape(self, x_shape):
out_shape = _Pool.infer_shape(self, x_shape)
_, _, out_h, out_w = out_shape
_, kernel_h, kernel_w, _ = self.ksize
argmax_shape = []
if self.is_tbe:
for i in range(4):
if i == 2:
dim = kernel_h * kernel_w
argmax_shape.append(dim)
elif i == 3:
dim = math.ceil(out_h * out_w / 16) + 1
argmax_shape.append(dim)
else:
argmax_shape.append(x_shape[i])
else:
argmax_shape = out_shape
return out_shape, argmax_shape
def infer_dtype(self, x_dtype):
out_dtype = x_dtype
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32), self.name)
argmax_dtype = mstype.uint16
if self.is_gpu:
argmax_dtype = mstype.int32
return out_dtype, argmax_dtype
class AvgPool(_Pool):
r"""
Average pooling operation.
Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, AvgPool2d outputs
regional average in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \frac{1}{h_{ker} * w_{ker}} \sum_{m=0}^{h_{ker}-1} \sum_{n=0}^{w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
Args:
ksize (Union[int, tuple[int]]): The size of kernel used to take the average value,
is an int number that represents height and width are both ksize, or a tuple
of two int numbers that represent height and width respectively. Default: 1.
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
padding (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output
will be returned without padding. Extra pixels will be discarded.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.avgpool_op = P.AvgPool(padding="VALID", ksize=2, strides=1)
>>>
>>> def construct(self, x):
>>> result = self.avgpool_op(x)
>>> return result
>>>
>>> input_x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
>>> net = Net()
>>> result = net(input_x)
[[[[ 2.5 3.5 4.5]
[ 6.5 7.5 8.5]]
[[ 14.5 15.5 16.5]
[ 18.5 19.5 20.5]]
[[ 26.5 27.5 28.5]
[ 30.5 31.5 32.5]]]]
"""
@prim_attr_register
def __init__(self, ksize=1, strides=1, padding="valid"):
if context.get_context("device_target") == "GPU":
self.target = "GPU"
elif context.get_context("enable_ge"):
self.target = "GE"
else:
self.target = "OTHER"
super(AvgPool, self).__init__(ksize, strides, padding)
class Conv2DBackpropInput(PrimitiveWithInfer):
"""
Computes the gradients of convolution with respect to the input.
Args:
out_channel (int): The dimensionality of the output space.
kernel_size (Union[int, tuple[int]]): The size of the convolution window.
pad_mode (str): Modes to fill padding. It could be "valid", "same", or "pad". Default: "valid".
pad (Union[int, tuple[int]]): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of
top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the
padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly.
mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
stride (Union[int. tuple[int]]): The stride to be applied to the convolution filter. Default: 1.
dilation (Union[int. tuple[int]]): Specifies the dilation rate to be used for the dilated convolution.
Default: 1.
group (int): Splits input into groups. Default: 1.
Returns:
Tensor, the gradients of convolution.
Examples:
>>> dout = Tensor(np.ones([10, 32, 30, 30]), mindspore.float32)
>>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
>>> x = Tensor(np.ones([10, 32, 32, 32]))
>>> conv2d_backprop_input = P.Conv2DBackpropInput(out_channel=32, kernel_size=3)
>>> conv2d_backprop_input(dout, weight, F.shape(x))
"""
@prim_attr_register
def __init__(self,
out_channel,
kernel_size,
pad_mode="valid",
pad=0,
pad_list=None,
mode=1,
stride=1,
dilation=1,
group=1):
"""init Conv2DBackpropInput"""
self.init_prim_io_names(inputs=['out_backprop', 'filter', 'input_sizes'], outputs=['output'])
self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=False)
self.add_prim_attr('stride', self.stride)
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)
self.add_prim_attr('dilation', self.dilation)
validator.check_value_type('pad', pad, (int, tuple), self.name)
if isinstance(pad, int):
pad = (pad,) * 4
else:
validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)
self.padding = pad
self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)
if pad_mode != 'pad' and pad != (0, 0, 0, 0):
raise ValueError(f"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.")
if self.pad_mode == 'pad':
for item in pad:
validator.check_integer('pad item', item, 0, Rel.GE, self.name)
pad_mode = pad_mode.upper()
self.add_prim_attr('pad_mode', pad_mode)
self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)
self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)
self.add_prim_attr('data_format', "NCHW")
if pad_list:
for x in pad_list:
validator.check_integer('element of pad_list', x, 0, Rel.GE, self.name)
self.pad_list = pad_list
def __infer__(self, doutput, w, x_size):
x_size_v = x_size['value']
validator.check_value_type('x_size', x_size_v, [tuple], self.name)
for i, dim_len in enumerate(x_size_v):
validator.check_value_type("x_size[%d]" % i, dim_len, [int], self.name)
args = {'doutput': doutput['dtype'], 'w': w['dtype']}
valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
# infer shape
dout_shape = doutput['shape']
kernel_h = self.kernel_size[0]
kernel_w = self.kernel_size[1]
stride_h = self.stride[0]
stride_w = self.stride[1]
dilation_h = self.dilation[2]
dilation_w = self.dilation[3]
# default pad mode is valid
pad_list = (0, 0, 0, 0)
if self.pad_list:
pad_list = tuple(self.pad_list)
elif self.pad_mode == "SAME":
pad_needed_h = max(0, (dout_shape[2] - 1) * stride_h + dilation_h * (kernel_h - 1) + 1 - x_size_v[2])
pad_top = math.floor(pad_needed_h / 2)
pad_bottom = pad_needed_h - pad_top
pad_needed_w = max(0, (dout_shape[3] - 1) * stride_w + dilation_w * (kernel_w - 1) + 1 - x_size_v[3])
pad_left = math.floor(pad_needed_w / 2)
pad_right = pad_needed_w - pad_left
pad_list = (pad_top, pad_bottom, pad_left, pad_right)
elif self.pad_mode == 'PAD':
pad_list = self.padding
self.add_prim_attr('pad_list', pad_list)
out = {
'value': None,
'shape': x_size_v,
'dtype': doutput['dtype'],
}
return out
class BiasAdd(PrimitiveWithInfer):
r"""
Returns sum of input and bias tensor.
Adds the 1-D bias tensor to the input tensor, and broadcasts the shape on all axis
except for the channel axis.
Inputs:
- **input_x** (Tensor) - The input tensor. The shape can be 2-4 dimensions.
- **bias** (Tensor) - The bias tensor, with shape :math:`(C)`.
The shape of `bias` must be the same as `input_x` in the second dimension.
Outputs:
Tensor, with the same shape and type as `input_x`.
Examples:
>>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32)
>>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32)
>>> bias_add = P.BiasAdd()
>>> bias_add(input_x, bias)
"""
@prim_attr_register
def __init__(self):
self.init_prim_io_names(inputs=['x', 'b'], outputs=['output'])
self.add_prim_attr('data_format', 'NCHW')
def infer_shape(self, x_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.GE, self.name)
validator.check_integer("bias rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "x_shape[1]", x_shape[1], Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_type, b_type):
args = {"input_x": x_type, "bias": b_type}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_type
class TopK(PrimitiveWithInfer):
"""
Finds values and indices of the `k` largest entries along the last dimension.
Args:
sorted (bool): If true, the resulting elements will
be sorted by the values in descending order. Default: False.
Inputs:
- **input_x** (Tensor) - Input to be computed, data type should be float16, float32 or int32.
- **k** (int) - Number of top elements to be computed along the last dimension, constant input is needed.
Outputs:
Tuple of 2 Tensor, the values and the indices.
- **values** (Tensor) - The `k` largest elements along each last dimensional slice.
- **indices** (Tensor) - The indices of values within the last dimension of input.
Examples:
>>> topk = P.TopK(sorted=True)
>>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16)
>>> k = 3
>>> values, indices = topk(input_x, k)
>>> assert values == Tensor(np.array([5, 4, 3]), mstype.float16)
>>> assert indices == Tensor(np.array([4, 3, 2]), mstype.int32)
"""
@prim_attr_register
def __init__(self, sorted=False):
validator.check_value_type("sorted", sorted, [bool], self.name)
self.init_prim_io_names(inputs=['input', 'k'],
outputs=['values', 'indices'])
def __infer__(self, input_x, k):
x_dtype = input_x['dtype']
valid_types = (mstype.int32, mstype.float16, mstype.float32)
validator.check_tensor_type_same({'x': x_dtype}, valid_types, self.name)
k_v = k['value']
validator.check_value_type('k', k_v, (int,), self.name)
x_shape = list(input_x['shape'])
ndim = len(x_shape) - 1
x_shape[ndim] = k_v
return {'shape': (x_shape, x_shape),
'dtype': (x_dtype, mstype.int32),
'value': None}
class SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Gets the softmax cross-entropy value between logits and labels which shoule be one-hot encoding.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = softmax(X_{ij}) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)}
.. math::
loss_{ij} = -\sum_j{Y_{ij} * ln(p_{ij})}
Inputs:
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type should be float16 or float32.
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N, C)`, has the same data type with `logits`.
Outputs:
Tuple of 2 Tensor, the loss shape is `(N,)`, and the dlogits with the same shape as `logits`.
Examples:
>>> logits = Tensor([[2, 4, 1, 4, 5], [2, 1, 2, 4, 3]], mindspore.float32)
>>> labels = Tensor([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0]], mindspore.float32)
>>> softmax_cross = P.SoftmaxCrossEntropyWithLogits()
>>> loss, backprop = softmax_cross(logits, labels)
([0.5899297, 0.52374405], [[0.02760027, 0.20393994, 0.01015357, 0.20393994, -0.44563377],
[0.08015892, 0.02948882, 0.08015892, -0.4077012, 0.21789455]])
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape", logits_shape, "labels_shape", labels_shape, Rel.EQ, self.name)
loss_shape = [logits_shape[0]]
dlogits_shape = logits_shape
return (loss_shape, dlogits_shape)
def infer_dtype(self, logits_type, labels_type):
args = {"logits": logits_type, "labels": labels_type}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return (logits_type, logits_type)
class SparseSoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Computes the softmax cross-entropy value between logits and sparse encoding labels.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = softmax(X_{ij}) = \frac{exp(x_i)}{\sum_{j = 0}^{N-1}\exp(x_j)}
.. math::
loss_{ij} = \begin{cases} -ln(p_{ij}), &j = y_i \cr -ln(1 - p_{ij}), & j \neq y_i \end{cases}
.. math::
loss = \sum_{ij} loss_{ij}
Args:
is_grad (bool): If it's true, this operation returns the computed gradient. Default: False.
Inputs:
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type should be float16 or float32.
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N)`.
Data type should be int32 or int64.
Outputs:
Tensor, if `is_grad` is False, the output tensor is the value of loss which is a scalar tensor;
if `is_grad` is True, the output tensor is the gradient of input with the same shape as `logits`.
Examples:
Please refer to the usage in nn.SoftmaxCrossEntropyWithLogits source code.
"""
@prim_attr_register
def __init__(self, is_grad=False):
self.init_prim_io_names(inputs=['features', 'labels'], outputs=['output'])
self.is_grad = is_grad
self.add_prim_attr('sens', 1.0)
def infer_shape(self, logits_shape, labels_shape):
validator.check("logits_shape[0]", logits_shape[0], "labels_shape[0]", labels_shape[0], Rel.EQ, self.name)
loss_shape = []
if self.is_grad:
return logits_shape
return loss_shape
def infer_dtype(self, logits_type, labels_type):
validator.check_tensor_type_same({"logits": logits_type}, (mstype.float16, mstype.float32), self.name)
validator.check_tensor_type_same({"labels": labels_type}, (mstype.int32, mstype.int64), self.name)
return logits_type
class ApplyMomentum(PrimitiveWithInfer):
"""
Optimizer that implements the Momentum algorithm.
Refer to the paper `On the importance of initialization and momentum in deep
learning <https://dl.acm.org/doi/10.5555/3042817.3043064>`_ for more details.
Inputs of `variable`, `accumulation` and `gradient` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
Data type conversion of Parameter is not supported. RuntimeError exception will be thrown.
Args:
use_locking (bool): Enable a lock to protect the update of variable and accumlation tensors. Default: False.
use_nesterov (bool): Enable Nesterov momentum. Default: False.
gradient_scale (float): The scale of the gradient. Default: 1.0.
Inputs:
- **variable** (Parameter) - Weights to be updated. data type should be float.
- **accumulation** (Parameter) - Accumulated gradient value by moment weight.
Has the same data type with `variable`.
- **learning_rate** (Union[Number, Tensor]) - The learning rate value, should be a float number or
a scalar tensor with float data type.
- **gradient** (Tensor) - Gradients, has the same data type as `variable`.
- **momentum** (Union[Number, Tensor]) - Momentum, should be a float number or
a scalar tensor with float data type.
Outputs:
Tensor, parameters to be updated.
Examples:
Please refer to the usage in nn.ApplyMomentum.
"""
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accumulation', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('learning_rate', dtype=sig.sig_dtype.T1),
sig.make_sig('gradient', dtype=sig.sig_dtype.T),
sig.make_sig('momentum', dtype=sig.sig_dtype.T2),
)
@prim_attr_register
def __init__(self, use_nesterov=False, use_locking=False, gradient_scale=1.0):
self.init_prim_io_names(inputs=['variable', 'accumulation', 'learning_rate', 'gradient', 'momentum'],
outputs=['output'])
self.is_tbe = context.get_context("device_target") == "Ascend"
self.is_ge = context.get_context("enable_ge")
def infer_shape(self, v_shape, a_shape, l_shape, g_shape, m_shape):
if not self.is_ge and self.is_tbe:
return v_shape, v_shape
return v_shape
def infer_dtype(self, v_dtype, a_dtype, l_dtype, g_dtype, m_dtype):
valid_types = [mstype.float16, mstype.float32, mstype.float64]
if v_dtype != mstype.type_refkey and a_dtype != mstype.type_refkey:
validator.check_tensor_type_same({"v": v_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"a": a_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l_dtype": l_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"g_dtype": g_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"m_dtype": m_dtype}, valid_types, self.name)
if not self.is_ge and self.is_tbe:
return g_dtype, g_dtype
return g_dtype
class SmoothL1Loss(PrimitiveWithInfer):
r"""
Computes smooth L1 loss, a robust L1 loss.
SmoothL1Loss is a Loss similar to MSELoss but less sensitive to outliers as described in the
`Fast R-CNN <https://arxiv.org/abs/1504.08083>`_ by Ross Girshick.
Note:
Sets input prediction as `X`, input target as `Y`, output as `loss`. Then,
.. math::
\text{SmoothL1Loss} = \begin{cases} \frac{0.5 x^{2}}{\text{beta}}, &if \left |x \right | < \text{beta} \cr
\left |x \right|-0.5 \text{beta}, &\text{otherwise}\end{cases}
Args:
beta (float): A parameter used to control the point where the function will change from
quadratic to linear. Default: 1.0.
Inputs:
- **prediction** (Tensor) - Predict data. Data type should be float16 or float32.
- **target** (Tensor) - Ground truth data, with the same type and shape as `prediction`.
Outputs:
Tensor, with the same type and shape as `prediction`.
Examples:
>>> loss = P.SmoothL1Loss()
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> loss(input_data, target_data)
[0, 0, 0.5]
"""
@prim_attr_register
def __init__(self, beta=1.0):
validator.check_value_type('beta', beta, [float], self.name)
validator.check('beta', beta, '', 0, Rel.GT, self.name)
self.init_prim_io_names(inputs=['prediction', 'target'], outputs=['output'])
def infer_shape(self, prediction, target):
validator.check('prediction shape', prediction, 'target shape', target, Rel.EQ, self.name)
return prediction
def infer_dtype(self, prediction, target):
args = {"prediction": prediction, "target": target}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
return prediction
class L2Loss(PrimitiveWithInfer):
"""
Calculates half of the L2 norm of a tensor without using the `sqrt`.
Set `input_x` as x and output as loss.
.. math::
loss = sum(x ** 2) / nelement(x)
:math:`nelement(x)` represents the number of `input_x`.
Inputs:
- **input_x** (Tensor) - A input Tensor. Data type should be float16 or float32.
Outputs:
Tensor, has the same dtype as `input_x`. The output tensor is the value of loss which is a scalar tensor.
Examples
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float16)
>>> l2_loss = P.L2Loss()
>>> l2_loss(input_x)
7.0
"""
@prim_attr_register
def __init__(self):
"""init L2Loss"""
def infer_shape(self, input_x):
loss_shape = []
return loss_shape
def infer_dtype(self, x_type):
validator.check_subclass("x_type", x_type, mstype.tensor, self.name)
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({'x_type': x_type}, valid_types, self.name)
return x_type
class DataFormatDimMap(PrimitiveWithInfer):
"""
Returns the dimension index in the destination data format given in the source data format.
Args:
src_format (string): An optional value for source data format. Default: 'NHWC'.
dst_format (string): An optional value for destination data format. Default: 'NCHW'.
Inputs:
- **input_x** (Tensor) - A Tensor with each element as a dimension index in source data format.
The suggested values is in the range [-4, 4). It's type is int32.
Outputs:
Tensor, has the same type as the `input_x`.
Examples:
>>> x = Tensor([0, 1, 2, 3], mindspore.int32)
>>> dfdm = P.DataFormatDimMap()
>>> dfdm(x)
[0 3 1 2]
"""
@prim_attr_register
def __init__(self, src_format='NHWC', dst_format='NCHW'):
valid_values = ['NHWC', 'NCHW']
self.src_format = validator.check_string("src_format", src_format, valid_values, self.name)
self.dst_format = validator.check_string("dst_format", dst_format, valid_values, self.name)
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_type):
validator.check_subclass("x", x_type, mstype.tensor, self.name)
valid_types = [mstype.int32]
validator.check_tensor_type_same({"x": x_type}, valid_types, self.name)
return x_type
class RNNTLoss(PrimitiveWithInfer):
"""
Computes the RNNTLoss and its gradient with respect to the softmax outputs.
Args:
blank_label (int): blank label. Default: 0.
Inputs:
- **acts** (Tensor) - Tensor of shape :math:`(B, T, U, V)`. Data type should be float16 or float32.
- **labels** (Tensor[int32]) - Tensor of shape :math:`(B, U-1)`.
- **input_lengths** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
- **label_lebgths** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
Outputs:
- **costs** (Tensor[int32]) - Tensor of shape :math:`(B,)`.
- **grads** (Tensor[int32]) - Has the same shape as `acts`.
Examples:
>>> B, T, U, V = 1, 2, 3, 5
>>> acts = np.random.random((B, T, U, V)).astype(np.float32)
>>> labels = np.array([[1, 2]]).astype(np.int32)
>>> input_length = np.array([T] * B).astype(np.int32)
>>> label_length = np.array([len(l) for l in labels]).astype(np.int32)
>>> rnnt_loss = P.RNNTLoss(blank_label=blank)
>>> costs, grads = rnnt_loss(Tensor(acts), Tensor(labels), Tensor(input_length), Tensor(label_length))
"""
@prim_attr_register
def __init__(self, blank_label=0):
validator.check_value_type('blank_label', blank_label, [int], self.name)
self.init_prim_io_names(inputs=['acts', 'labels', 'input_length', 'label_length'],
outputs=['costs', 'grads'])
def infer_shape(self, acts_shape, labels_shape, input_length_shape, label_length_shape):
validator.check_integer('acts_rank', len(acts_shape), 4, Rel.EQ, self.name)
validator.check_integer('labels_rank', len(labels_shape), 2, Rel.EQ, self.name)
validator.check_integer('input_length_rank', len(input_length_shape), 1, Rel.EQ, self.name)
validator.check_integer('label_length_rank', len(label_length_shape), 1, Rel.EQ, self.name)
validator.check('labels shape[0]', labels_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('labels shape[1]', labels_shape[1], 'acts shape[2]-1', acts_shape[2]-1, Rel.EQ, self.name)
validator.check('input_length size', input_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
validator.check('label_length size', label_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)
costs_shape = (acts_shape[0],)
return (costs_shape, acts_shape)
def infer_dtype(self, acts_type, labels_type, input_length_type, label_length_type):
validator.check_subclass("acts_type", acts_type, mstype.tensor, self.name)
validator.check_subclass("labels_type", labels_type, mstype.tensor, self.name)
validator.check_subclass("input_length_type", input_length_type, mstype.tensor, self.name)
validator.check_subclass("label_length_type", label_length_type, mstype.tensor, self.name)
validator.check_tensor_type_same({"acts_type": acts_type}, [mstype.float32, mstype.float16], self.name)
validator.check_tensor_type_same({"labels_type": labels_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"input_length_type": input_length_type}, [mstype.int32], self.name)
validator.check_tensor_type_same({"label_length_type": label_length_type}, [mstype.int32], self.name)
return (acts_type, acts_type)
class SGD(PrimitiveWithInfer):
"""
Computes stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from On the importance of
initialization and momentum in deep learning.
Note:
For details, please refer to `nn.SGD` source code.
Args:
dampening (float): The dampening for momentum. Default: 0.0.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0.
nesterov (bool): Enable Nesterov momentum. Default: False.
Inputs:
- **parameters** (Tensor) - Parameters to be updated. With float16 or float32 data type.
- **gradient** (Tensor) - Gradients. With float16 or float32 data type.
- **learning_rate** (Tensor) - Learning rate, a scalar tensor with float16 or float32 data type.
e.g. Tensor(0.1, mindspore.float32)
- **accum** (Tensor) - Accum(velocity) to be updated. With float16 or float32 data type.
- **momentum** (Tensor) - Momentum, a scalar tensor with float16 or float32 data type.
e.g. Tensor(0.1, mindspore.float32).
- **stat** (Tensor) - States to be updated with the same shape as gradient. With float16 or float32 data type.
Outputs:
Tensor, parameters to be updated.
Examples:
>>> sgd = P.SGD()
>>> parameters = Tensor(np.array([2, -0.5, 1.7, 4]), mindspore.float32)
>>> gradient = Tensor(np.array([1, -1, 0.5, 2]), mindspore.float32)
>>> learning_rate = Tensor(0.01, mindspore.float32)
>>> accum = Tensor(np.array([0.1, 0.3, -0.2, -0.1]), mindspore.float32)
>>> momentum = Tensor(0.1, mindspore.float32)
>>> stat = Tensor(np.array([1.5, -0.3, 0.2, -0.7]), mindspore.float32)
>>> result = sgd(parameters, gradient, learning_rate, accum, momentum, stat)
"""
@prim_attr_register
def __init__(self, dampening=0.0, weight_decay=0.0, nesterov=False):
validator.check_value_type("nesterov", nesterov, [bool], self.name)
if nesterov and dampening != 0:
raise ValueError(f"Nesterov need zero dampening!")
self.init_prim_io_names(inputs=['parameters', 'gradient', 'learning_rate', 'accum', 'momentum', 'stat'],
outputs=['output'])
def infer_shape(self, parameters_shape, gradient_shape, learning_rate_shape,
accum_shape, momentum_shape, stat_shape):
validator.check_integer(f'parameters rank', len(parameters_shape), 0, Rel.GT, self.name)
validator.check_integer(f'gradient rank', len(gradient_shape), 0, Rel.GE, self.name)
validator.check_integer(f'learning rate rank', len(learning_rate_shape), 0, Rel.GE, self.name)
validator.check_integer(f'accumulation rank', len(accum_shape), 0, Rel.GT, self.name)
validator.check_integer(f'momentum rank', len(momentum_shape), 0, Rel.GE, self.name)
validator.check_integer(f'stat rank', len(stat_shape), 0, Rel.GE, self.name)
validator.check("gradient shape", gradient_shape, "stat shape", stat_shape, Rel.EQ, self.name)
return parameters_shape
def infer_dtype(self, parameters_dtype, gradient_dtype, learning_rate_dtype,
accum_dtype, momentum_dtype, stat_dtype):
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same({"parameters": parameters_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"gradient": gradient_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"learning_rate": learning_rate_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"accum": accum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"momentum": momentum_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"stat": stat_dtype}, valid_types, self.name)
return parameters_dtype
class ApplyRMSProp(PrimitiveWithInfer):
"""
Optimizer that implements the Root Mean Square prop(RMSProp) algorithm.
Please refer to the usage in source code of `nn.RMSProp`.
Note:
Update `var` according to the RMSProp algorithm.
.. math::
s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2
.. math::
m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} + \\epsilon}} \\nabla Q_{i}(w)
.. math::
w = w - m_{t}
where :math:`w` represents `var`, which will be updated.
:math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,
:math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.
:math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`.
:math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`.
Args:
use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False.
Inputs:
- **var** (Tensor) - Weights to be update.
- **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`.
- **moment** (Tensor) - Delta of `var`, must have the same type as `var`.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. Should be a float number or
a scalar tensor with float16 or float32 data type.
- **grad** (Tensor) - Gradients, must have the same type as `var`.
- **decay** (float) - Decay rate. Only constant value is allowed.
- **momentum** (float) - Momentum. Only constant value is allowed.
- **epsilon** (float) - Ridge term. Only constant value is allowed.
Outputs:
Tensor, parameters to be update.
Examples:
>>> apply_rms = P.ApplyRMSProp()
>>> input_x = Tensor(1., mindspore.float32)
>>> mean_square = Tensor(2., mindspore.float32)
>>> moment = Tensor(1., mindspore.float32)
>>> grad = Tensor(2., mindspore.float32 )
>>> learning_rate = Tensor(0.9, mindspore.float32)
>>> decay = 0.0
>>> momentum = 1e-10
>>> epsilon = 0.001
>>> result = apply_rms(input_x, mean_square, moment, learning_rate, grad, decay, momentum, epsilon)
(-2.9977674, 0.80999994, 1.9987665)
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'mean_square', 'moment', 'learning_rate', 'grad',
'rho', 'momentum', 'epsilon'], outputs=['output'])
self.is_ge = context.get_context("enable_ge")
self.is_d = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_square_shape, moment_shape, learning_rate_shape, grad_shape, decay_shape,
momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if not self.is_ge and self.is_d:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_dtype, mean_square_dtype, moment_dtype, learning_rate_dtype, grad_dtype, decay_dtype,
momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_decay = {"decay": decay_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_decay, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "decay": decay_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if not self.is_ge and self.is_d:
return var_dtype, var_dtype, var_dtype
return var_dtype
def infer_value(self, var, mean_square, moment, learning_rate, grad, decay, momentum, epsilon):
if decay is None or momentum is None or epsilon is None:
raise ValueError(f"For {self.name}, decay, momentum, epsilon must be const.")
class ApplyCenteredRMSProp(PrimitiveWithInfer):
"""
Optimizer that implements the centered RMSProp algorithm.
Please refer to the usage in source code of `nn.RMSProp`.
Note:
Update `var` according to the centered RMSProp algorithm.
.. math::
g_{t} = \\rho g_{t-1} + (1 - \\rho)\\nabla Q_{i}(w)
.. math::
s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2
.. math::
m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} - g_{t}^2 + \\epsilon}} \\nabla Q_{i}(w)
.. math::
w = w - m_{t}
where :math:`w` represents `var`, which will be updated.
:math:`g_{t}` represents `mean_gradient`, :math:`g_{t-1}` is the last momentent of :math:`g_{t}`.
:math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,
:math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.
:math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`.
:math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.
:math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`.
Args:
use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False.
Inputs:
- **var** (Tensor) - Weights to be update.
- **mean_gradient** (Tensor) - Mean gradients, must have the same type as `var`.
- **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`.
- **moment** (Tensor) - Delta of `var`, must have the same type as `var`.
- **grad** (Tensor) - Gradients, must have the same type as `var`.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. Should be a float number or
a scalar tensor with float16 or float32 data type.
- **decay** (float) - Decay rate.
- **momentum** (float) - Momentum.
- **epsilon** (float) - Ridge term.
Outputs:
Tensor, parameters to be update.
Examples:
>>> centered_rms_prop = P.ApplyCenteredRMSProp()
>>> input_x = Tensor(np.arange(-6, 6).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> mean_grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> mean_square = Tensor(np.arange(-8, 4).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> moment = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)
>>> learning_rate = Tensor(0.9, mindspore.float32)
>>> decay = 0.0
>>> momentum = 1e-10
>>> epsilon = 0.05
>>> result = centered_rms_prop(input_x, mean_grad, mean_square, moment, grad,
>>> learning_rate, decay, momentum, epsilon)
[[[ -6. -9.024922]
[-12.049845 -15.074766]
[-18.09969 -21.124613]]
[[-24.149532 -27.174456]
[-30.199379 -33.2243 ]
[-36.249226 -39.274143]]]
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_ascend = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, mean_gradient_shape, mean_square_shape, moment_shape, grad_shape,
learning_rate_shape, decay_shape, momentum_shape, epsilon_shape):
validator.check("var_shape", var_shape, "mean_gradient_shape", mean_gradient_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
if self.is_ascend:
return var_shape, mean_gradient_shape, mean_square_shape, moment_shape
return var_shape
def infer_dtype(self, var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype, grad_dtype,
learning_rate_dtype, rho_dtype, momentum_dtype, epsilon_dtype):
args = {"var": var_dtype, "mean_gradient": mean_gradient_dtype,
"mean_square": mean_square_dtype, "moment": moment_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
valid_types = [mstype.float16, mstype.float32]
args_rho = {"rho": rho_dtype, 'momentum': momentum_dtype, "epsilon": epsilon_dtype}
validator.check_type_same(args_rho, valid_types, self.name)
args_lr = {"learning_rate": learning_rate_dtype, "rho": rho_dtype}
validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)
if self.is_ascend:
return var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype
return var_dtype
class LayerNorm(Primitive):
r"""
Applies the Layer Normalization to the input tensor.
This operator will normalize the input tensor on given axis. LayerNorm is described in the paper
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNorm,
the value should be in [-1, rank(input)). Default: 1.
begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to
apply LayerNorm, the value should be in [-1, rank(input)). Default: 1.
epsilon (float): A value added to the denominator for numerical stability. Default: 1e-7.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)`.
The input of LayerNorm.
- **gamma** (Tensor) - Tensor of shape :math:`(P_0, \ldots, P_\text{begin_params_axis})`.
The learnable parameter `gamma` as the scale on norm.
- **beta** (Tensor) - Tensor of shape :math:`(P_0, \ldots, P_\text{begin_params_axis})`.
The learnable parameter `beta` as the scale on norm.
Outputs:
tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
The shape is :math:`(N, C)`.
- **mean** (Tensor) - Tensor of shape :math:`(C,)`.
- **variance** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
>>> beta = Tensor(np.ones([3]), mindspore.float32)
>>> layer_norm = P.LayerNorm()
>>> output = layer_norm(input_x, gamma, beta)
([[-0.22474492, 1., 2.2247488], [-0.22474492, 1., 2.2247488]],
[[2.], [2.]], [[0.6666667], [0.6666667]])
"""
@prim_attr_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7):
validator.check_value_type('begin_norm_axis', begin_norm_axis, [int], self.name)
validator.check_value_type('begin_params_axis', begin_params_axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [float], self.name)
class L2Normalize(PrimitiveWithInfer):
r"""
L2 normalization Operator.
This operator will normalizes the input using the given axis. The function is shown as follows:
.. math::
\text{output} = \frac{x}{\sqrt{\text{max}(\text{sum} (\text{input_x}^2), \epsilon)}},
where :math:`\epsilon` is epsilon.
Args:
axis (int): The begin axis for the input to apply L2 normalize. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-4.
Inputs:
- **input_x** (Tensor) - Input to compute the normalization. Data type should be float16 or float32.
Outputs:
Tensor, with the same type and shape as the input.
Examples:
>>> l2_normalize = P.L2Normalize()
>>> input_x = Tensor(np.random.randint(-256, 256, (2, 3, 4)), mindspore.float32)
>>> result = l2_normalize(input_x)
[[[-0.47247353 -0.30934513 -0.4991462 0.8185567 ]
[-0.08070751 -0.9961299 -0.5741758 0.09262337]
[-0.9916556 -0.3049123 0.5730487 -0.40579924]
[[-0.88134485 0.9509498 -0.86651784 0.57442576]
[ 0.99673784 0.08789381 -0.8187321 0.9957012 ]
[ 0.12891524 -0.9523804 -0.81952125 0.91396334]]]
"""
@prim_attr_register
def __init__(self, axis=0, epsilon=1e-4):
validator.check_value_type('axis', axis, [int], self.name)
validator.check_value_type('epsilon', epsilon, [int, float], self.name)
def infer_shape(self, input_x):
dim = len(input_x)
validator.check_int_range('axis value', self.axis, -dim, dim, Rel.INC_LEFT, self.name)
return input_x
def infer_dtype(self, input_x):
validator.check_subclass("x", input_x, mstype.tensor, self.name)
validator.check_tensor_type_same({"input_x": input_x}, [mstype.float16, mstype.float32], self.name)
return input_x
class DropoutGenMask(Primitive):
"""
Generates the mask value for the input shape.
Args:
Seed0 (int): Seed0 value for random generating. Default: 0.
Seed1 (int): Seed1 value for random generating. Default: 0.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
- **keep_prob** (Tensor) - The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout_gen_mask = P.DropoutGenMask()
>>> shape = (20, 16, 50)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> mask = dropout_gen_mask(shape, keep_prob)
"""
@prim_attr_register
def __init__(self, Seed0=0, Seed1=0):
self.init_prim_io_names(inputs=['shape', 'keep_prob'], outputs=['output'])
validator.check_value_type("Seed0", Seed0, [int], self.name)
validator.check_value_type("Seed1", Seed1, [int], self.name)
self.add_prim_attr("_random_effect", True)
class DropoutDoMask(PrimitiveWithInfer):
"""
Applies dropout mask on the input tensor.
Take the mask output of DropoutGenMask as input, and apply dropout on the input.
Inputs:
- **input_x** (Tensor) - The input tensor.
- **mask** (Tensor) - The mask to be applied on `input_x`, which is the output of `DropoutGenMask`. And the
shape of `input_x` must be the same as the value of `DropoutGenMask`'s input `shape`. If input wrong `mask`,
the output of `DropoutDoMask` are unpredictable.
- **keep_prob** (Tensor) - The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units. The value of `keep_prob` is the same as the input `keep_prob` of
`DropoutGenMask`.
Outputs:
Tensor, the value that applied dropout on.
Examples:
>>> x = Tensor(np.ones([20, 16, 50]), mindspore.float32)
>>> shape = (20, 16, 50)
>>> keep_prob = Tensor(0.5, mindspore.float32)
>>> dropout_gen_mask = P.DropoutGenMask()
>>> dropout_do_mask = P.DropoutDoMask()
>>> mask = dropout_gen_mask(shape, keep_prob)
>>> output = dropout_do_mask(x, mask, keep_prob)
>>> assert output.shape == (20, 16, 50)
"""
@prim_attr_register
def __init__(self):
pass
def __infer__(self, input_x, mask, keep_prob):
input_x_shape = input_x['shape']
mask_shape = mask['shape']
keep_prob_shape = keep_prob['shape']
validator.check("keep_prob's dim", len(keep_prob_shape), '0(scalar)', 0, Rel.EQ, self.name)
size_x = reduce(lambda x, y: x * y, input_x_shape)
if len(mask_shape) != 1:
raise ValueError("DropoutDoMask mask shape should be 1-dimension.")
size_y = mask_shape[0] * 8
if size_x > size_y:
raise ValueError(f"DropoutDoMask y mask do not math input input_x shape:"
"{input_x_shape}, mask shape: {mask_shape}.")
validator.check_tensor_type_same({"input_x": input_x['dtype']}, [mstype.float32, mstype.float16, mstype.int32],
self.name)
validator.check_tensor_type_same({"input_mask": mask['dtype']}, [mstype.uint8], self.name)
keep_prob_v = keep_prob['value']
if keep_prob_v is not None:
validator.check_number_range('keep_prob', keep_prob_v.asnumpy(), 0, 1, Rel.INC_BOTH, self.name)
out = {'shape': input_x_shape,
'dtype': input_x['dtype'],
'value': None}
return out
class ResizeBilinear(PrimitiveWithInfer):
r"""
Resizes the image to certain size using bilinear interpolation.
The resizing only affects the lower two dimensions which represent the height and width. The input images
can be represented by different data types, but the data types of output images are always float32.
Args:
size (tuple[int]): A tuple of 2 int elements `(new_height, new_width)`, the new size for the images.
align_corners (bool): If it's true, rescale input by `(new_height - 1) / (height - 1)`,
which exactly aligns the 4 corners of images and resized images. If it's false,
rescale by `new_height / height`. Default: False.
Inputs:
- **input** (Tensor) - Image to be resized. Tensor of shape `(N_i, ..., N_n, height, width)`,
with data type of float32 or float16.
Outputs:
Tensor, resized image. Tensor of shape `(N_i, ..., N_n, new_height, new_width)` in `float32`.
Examples:
>>> tensor = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32)
>>> resize_bilinear = P.ResizeBilinear((5, 5))
>>> result = resize_bilinear(tensor)
>>> assert result.shape == (1, 1, 5, 5)
"""
@prim_attr_register
def __init__(self, size, align_corners=False):
pass
def infer_shape(self, input_shape):
input_shape = list(input_shape)
batch, channel, _, _ = input_shape
out_shape = [batch, channel]
for i in self.size:
out_shape.append(int(i))
return out_shape
def infer_dtype(self, input_dtype):
validator.check_tensor_type_same({'input_dtype': input_dtype}, [mstype.float16, mstype.float32], self.name)
return mstype.tensor_type(mstype.float32)
class OneHot(PrimitiveWithInfer):
r"""
Computes a one-hot tensor.
Makes a new tensor, whose locations represented by indices in `indices` take value `on_value`, while all
other locations take value `off_value`.
Note:
If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
Args:
axis (int): Position to insert the value. e.g. If `indices` shape is [n, c], and `axis` is `-1` the output shape
will be [n, c, depth], If `axis` is `0` the output shape will be [depth, n, c]. Default: -1.
Inputs:
- **indices** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
Data type must be int32.
- **depth** (int) - A scalar defining the depth of the one hot dimension.
- **on_value** (Tensor) - A value to fill in output when `indices[j] = i`. With data type of float16 or float32.
- **off_value** (Tensor) - A value to fill in output when `indices[j] != i`.
Has the same data type with as `on_value`.
Outputs:
Tensor, one_hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`.
Examples:
>>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)
>>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
>>> onehot = P.OneHot()
>>> result = onehot(indices, depth, on_value, off_value)
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
"""
@prim_attr_register
def __init__(self, axis=-1):
self.init_prim_io_names(inputs=['indices', 'depth', 'on_value', 'off_value'], outputs=['output'])
validator.check_value_type("axis", axis, [int], self.name)
def __infer__(self, indices, depth, on_value, off_value):
# check type
validator.check_tensor_type_same({"indices": indices['dtype']}, (mstype.int32,), self.name)
validator.check_type_name("depth", depth['dtype'], mstype.int_type, self.name)
args = {"on_value": on_value['dtype'], "off_value": off_value['dtype']}
validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)
# check shape
indices_shp = indices['shape']
validator.check_int_range("axis", self.axis, -1, len(indices_shp), Rel.INC_BOTH, self.name)
depth_val = depth['value']
validator.check_integer("depth", depth_val, 0, Rel.GE, self.name)
# create new dimension at end if self.axis is -1
_ = indices_shp.insert(self.axis, depth_val) if self.axis >= 0 else indices_shp.append(depth_val)
return {'shape': indices_shp,
'dtype': on_value['dtype'],
'value': None}
class Gelu(PrimitiveWithInfer):
r"""
Gaussian Error Linear Units activation function.
GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
<https://arxiv.org/abs/1810.04805>`_.
Gelu is defined as follows:
.. math::
\text{output} = 0.5 * x * (1 + erf(x / \sqrt{2})),
where :math:`erf` is the "Gauss error function" .
Inputs:
- **input_x** (Tensor) - Input to compute the Gelu with data type of float16 or float32.
Outputs:
Tensor, with the same type and shape as input.
Examples:
>>> tensor = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> gelu = P.Gelu()
>>> result = gelu(tensor)
"""
@prim_attr_register
def __init__(self):
"""init GeLU"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])
def infer_shape(self, input_x):
return input_x
def infer_dtype(self, input_x):
validator.check_tensor_type_same({"input_x": input_x}, (mstype.float16, mstype.float32), self.name)
return input_x
class GetNext(PrimitiveWithInfer):
"""
Returns the next element in the dataset queue.
Note:
The GetNext operation needs to be associated with network and it also depends on the init_dataset interface,
it can't be used directly as a single operation.
For details, please refer to `nn.DataWrapper` source code.
Args:
types (list[:class:`mindspore.dtype`]): The type of the outputs.
shapes (list[tuple[int]]): The dimensionality of the outputs.
output_num (int): The output number, length of `types` and `shapes`.
shared_name (str): The queue name of `init_dataset` interface.
Inputs:
No inputs.
Outputs:
tuple[Tensor], the output of Dataset. The shape is described in `shapes`
and the type is described is `types`.
Examples:
>>> get_next = P.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 2, 'shared_name')
>>> feature, label = get_next()
"""
@prim_attr_register
def __init__(self, types, shapes, output_num, shared_name):
validator.check_value_type("types", types, [list, tuple], self.name)
validator.check_value_type("shapes", shapes, [list, tuple], self.name)
validator.check("types length", len(types), "shapes length", len(shapes), Rel.EQ, self.name)
validator.check_value_type("output_num", output_num, [int], self.name)
def infer_shape(self):
return tuple(self.shapes)
def infer_dtype(self):
return tuple(self.types)
class PReLU(PrimitiveWithInfer):
r"""
Parametric Rectified Linear Unit activation function.
PReLU is described in the paper `Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification <https://arxiv.org/abs/1502.01852>`_. Defined as follows:
.. math::
prelu(x_i)= \max(0, x_i) + \min(0, w * x_i),
where :math:`x_i` is an element of an channel of the input.
Note:
1-dimensional input_x is not supported.
Inputs:
- **input_x** (Tensor) - Float tensor, representing the output of the preview layer.
With data type of float16 or float32.
- **weight** (Tensor) - Float Tensor, w > 0, there is only two shapes are legitimate,
1 or the number of channels at input. With data type of float16 or float32.
Outputs:
Tensor, with the same type as `input_x`.
Detailed information, please refer to `nn.PReLU`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.prelu = P.PReLU()
>>> def construct(self, input_x, weight):
>>> result = self.prelu(input_x, weight)
>>> return result
>>>
>>> input_x = Tensor(np.random.randint(-3, 3, (2, 3, 2)), mindspore.float32)
>>> weight = Tensor(np.array([0.1, 0.6, -0.3]), mindspore.float32)
>>> net = Net()
>>> result = net(input_x, weight)
[[[-0.1 1. ]
[ 0. 2. ]
[0. 0. ]]
[[-0.2 -0.1 ]
[2. -1.8000001]
[0.6 0.6 ]]]
"""
@prim_attr_register
def __init__(self):
pass
def infer_shape(self, input_x_shape, weight_shape):
input_x_dim = len(input_x_shape)
weight_dim = len(weight_shape)
if input_x_dim == 1:
raise ValueError(f'For \'{self.name}\' input_x rank 1 is not supported.')
if weight_dim != 1:
raise ValueError(f'For \'{self.name}\' weight_dim must be 1, while weight_dim is {weight_dim}.')
if weight_shape[0] != input_x_shape[1] and weight_shape[0] != 1:
raise ValueError(f'For \'{self.name}\' channel of input_x and weight must be matched,'
f' while channel of input_x is {input_x_shape[1]},'
f' weight_shape[0] is {weight_shape[0]}.')
return input_x_shape
def infer_dtype(self, input_x_dtype, weight_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"input_x": input_x_dtype}, valid_types, self.name)
validator.check_tensor_type_same({"weight": weight_dtype}, valid_types, self.name)
return input_x_dtype
class LSTM(PrimitiveWithInfer):
"""
Performs the long short term memory(LSTM) on the input.
Detailed information, please refer to `nn.LSTM`.
"""
@prim_attr_register
def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
self.input_size = validator.check_integer("input_size", input_size, 0, Rel.GT, self.name)
self.hidden_size = validator.check_integer("hidden_size", hidden_size, 0, Rel.GT, self.name)
self.num_layers = validator.check_integer("num_layers", num_layers, 0, Rel.GT, self.name)
self.has_bias = validator.check_value_type("has_bias", has_bias, (bool,), self.name)
self.bidirectional = validator.check_value_type("bidirectional", bidirectional, (bool,), self.name)
self.dropout = validator.check_value_type("dropout", dropout, [float], self.name)
self.dropout = validator.check_number_range('dropout', dropout, 0, 1, Rel.INC_BOTH, self.name)
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
def infer_shape(self, x_shape, h_shape, c_shape, w_shape):
# (seq, batch_size, feature)
validator.check_integer("x rank", len(x_shape), 3, Rel.EQ, self.name)
validator.check_integer("x[2]", x_shape[2], self.input_size, Rel.EQ, self.name)
# h and c should be same shape
validator.check_integer("h rank", len(h_shape), 3, Rel.EQ, self.name)
validator.check("h_shape", h_shape, "c_shape", c_shape, Rel.EQ, self.name)
# (num_layers * num_directions, batch, hidden_size)
validator.check_integer("h[0]", h_shape[0], self.num_layers * self.num_directions, Rel.EQ, self.name)
validator.check_integer("h[1]", h_shape[1], x_shape[1], Rel.EQ, self.name)
validator.check_integer("h[2]", h_shape[2], self.hidden_size, Rel.EQ, self.name)
y_shape = (x_shape[0], x_shape[1], self.hidden_size * self.num_directions)
# set arbitrary shape for reserved space
type_size = 4
gates_ws_ld = self.get_good_ld(self.hidden_size * 4, type_size)
states_ws_ld = self.get_good_ld(max(self.hidden_size, self.input_size), type_size)
self.ws_gates_size = self.num_layers * self.num_directions * x_shape[0] * x_shape[1] * gates_ws_ld * type_size
self.ws_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_c_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_diff_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * (2 + 1) * x_shape[
1] * states_ws_ld * type_size
self.ws_grid_comp_size = 0
self.page_size = 4096
current_offset = 0
current_offset += self.ws_gates_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_c_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_diff_states_size
current_offset = self.rnd_up(current_offset, self.page_size)
current_offset += self.ws_grid_comp_size
reserved_shape = (current_offset, 1)
state_shape = (1, 1)
return (y_shape, h_shape, c_shape, reserved_shape, state_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype):
args = {'x': x_dtype, 'h': h_dtype, 'c': c_dtype, 'w': w_dtype}
validator.check_tensor_type_same(args, (mstype.float32, mstype.float16), self.name)
return (x_dtype, x_dtype, x_dtype, x_dtype, x_dtype)
def rnd_up(self, current_offset, page_size):
return ((current_offset + page_size - 1) // page_size) * page_size
def get_good_ld(self, dim, type_size):
ld = self.rnd_up(dim, 64 // type_size)
if ld * 256 == 0:
return ld + 64 // type_size
return ld
class SigmoidCrossEntropyWithLogits(PrimitiveWithInfer):
r"""
Uses the given logits to compute sigmoid cross entropy.
Note:
Sets input logits as `X`, input label as `Y`, output as `loss`. Then,
.. math::
p_{ij} = sigmoid(X_{ij}) = \frac{1}{1 + e^{-X_{ij}}}
.. math::
loss_{ij} = -[Y_{ij} * ln(p_{ij}) + (1 - Y_{ij})ln(1 - p_{ij})]
Inputs:
- **logits** (Tensor) - Input logits.
- **label** (Tensor) - Ground truth label.
Outputs:
Tensor, with the same shape and type as input `logits`.
Examples:
>>> logits = Tensor(np.random.randn(2, 3).astype(np.float16))
>>> labels = Tensor(np.random.randn(2, 3).astype(np.float16))
>>> sigmoid = P.SigmoidCrossEntropyWithLogits()
>>> sigmoid(logits, labels)
"""
@prim_attr_register
def __init__(self):
"""Init SigmoidCrossEntropyWithLogits"""
self.init_prim_io_names(inputs=['predict', 'target'], outputs=['loss'])
def infer_shape(self, x_shape, y_shape):
validator.check("x_shape", x_shape, "y_shape", y_shape, Rel.EQ, self.name)
return x_shape
def infer_dtype(self, x_dtype, y_dtype):
args = {"x_dtype": x_dtype, "y_dtype": y_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
return x_dtype
class Pad(PrimitiveWithInfer):
"""
Pads input tensor according to the paddings.
Args:
paddings (tuple): The shape of parameter `paddings` is (N, 2). N is the rank of input data. All elements of
paddings are int type. For the input in `D` th dimension, paddings[D, 0] indicates how many sizes to be
extended ahead of the input tensor in the `D` th dimension, and paddings[D, 1] indicates how many sizes to
be extended behind of the input tensor in the `D` th dimension.
Inputs:
- **input_x** (Tensor) - The input tensor.
Outputs:
Tensor, the tensor after padding.
Examples:
>>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> pad_op = P.Pad(((1, 2), (2, 1)))
>>> output_tensor = pad_op(input_tensor)
>>> assert output_tensor == Tensor(np.array([[ 0. , 0. , 0. , 0. , 0. , 0. ],
>>> [ 0. , 0. , -0.1, 0.3, 3.6, 0. ],
>>> [ 0. , 0. , 0.4, 0.5, -3.2, 0. ],
>>> [ 0. , 0. , 0. , 0. , 0. , 0. ],
>>> [ 0. , 0. , 0. , 0. , 0. , 0. ]]), mindspore.float32)
"""
@prim_attr_register
def __init__(self, paddings):
"""Init Pad"""
self.init_prim_io_names(inputs=['x'], outputs=['y'])
if not isinstance(paddings, tuple):
raise TypeError('Paddings must be tuple type.')
for item in paddings:
if len(item) != 2:
raise ValueError('The shape of paddings must be (n, 2).')
self.paddings = paddings
def infer_shape(self, x):
paddings = np.array(self.paddings)
validator.check_integer('paddings.shape', paddings.size, len(x) * 2, Rel.EQ, self.name)
if not np.all(paddings >= 0):
raise ValueError('All elements of paddings must be >= 0.')
y_shape = ()
for i in range(int(paddings.size / 2)):
y_shape += ((x[i] + paddings[i, 0] + paddings[i, 1]),)
return y_shape
def infer_dtype(self, x):
validator.check_subclass("input_x", x, mstype.tensor, self.name)
return x
class MirrorPad(PrimitiveWithInfer):
"""
Pads the input tensor according to the paddings and mode.
Args:
mode (str): Specifies padding mode. The optional values are "REFLECT", "SYMMETRIC".
Default: "REFLECT".
Inputs:
- **input_x** (Tensor) - The input tensor.
- **paddings** (Tensor) - The paddings tensor. The value of `paddings` is a matrix(list),
and its shape is (N, 2). N is the rank of input data. All elements of paddings
are int type. For the input in `D` th dimension, paddings[D, 0] indicates how many sizes to be
extended ahead of the input tensor in the `D` th dimension, and paddings[D, 1] indicates how many sizes to
be extended behind of the input tensor in the `D` th dimension.
Outputs:
Tensor, the tensor after padding.
- If `mode` is "REFLECT", it uses a way of symmetrical copying throught the axis of symmetry to fill in.
If the `input_x` is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the
Outputs is [[6,5,4,5,6,5,4],[3,2,1,2,3,2,1],[6,5,4,5,6,5,4],[9,8,7,8,9,8,7],[6,5,4,5,6,5,4]].
- If `mode` is "SYMMETRIC", the filling method is similar to the "REFLECT". It is also copied
according to the symmetry axis, except that it includes the symmetry axis. If the `input_x`
is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the Outputs is
[[2,1,1,2,3,3,2],[2,1,1,2,3,3,2],[5,4,4,5,6,6,5],[8,7,7,8,9,9,8],[8,7,7,8,9,9,8]].
Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> import mindspore.nn as nn
>>> import numpy as np
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.pad = P.MirrorPad(mode="REFLECT")
>>> def construct(self, x, paddings):
>>> return self.pad(x, paddings)
>>> x = np.random.random(size=(2, 3)).astype(np.float32)
>>> paddings = Tensor([[1,1],[2,2]])
>>> pad = Net()
>>> ms_output = pad(Tensor(x), paddings)
"""
@prim_attr_register
def __init__(self, mode='REFLECT'):
"""Init Pad"""
validator.check_string('mode', mode, ['REFLECT', 'SYMMETRIC'], self.name)
self.mode = mode
self.set_const_input_indexes([1])
def __infer__(self, input_x, paddings):
validator.check_subclass("input_x", input_x['dtype'], mstype.tensor, self.name)
validator.check_subclass("paddings", paddings['dtype'], mstype.tensor, self.name)
x_shape = list(input_x['shape'])
paddings_value = paddings['value'].asnumpy()
paddings_size = paddings_value.size
validator.check_integer('paddings.shape', paddings_size, len(x_shape) * 2, Rel.EQ, self.name)
if not np.all(paddings_value >= 0):
raise ValueError('All elements of paddings must be >= 0.')
adjust = 0
if self.mode == 'SYMMETRIC':
adjust = 1
for i in range(0, int(paddings_size / 2)):
if (paddings_value[i, 0] >= x_shape[i] + adjust) or (paddings_value[i, 1] >= x_shape[i] + adjust):
raise ValueError('At least one dim has too high a padding value for this input and mode')
y_shape = ()
for i in range(0, int(paddings_size / 2)):
y_shape += ((x_shape[i] + paddings_value[i, 0] + paddings_value[i, 1]),)
return {'shape': y_shape,
'dtype': input_x['dtype'],
'value': None}
class ROIAlign(PrimitiveWithInfer):
"""
Computes Region of Interest (RoI) Align operator.
The operator computes the value of each sampling point by bilinear interpolation from the nearby grid points on the
feature map. No quantization is performed on any coordinates involved in the RoI, its bins, or the sampling
points. The details of (RoI) Align operator are described in `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
Args:
pooled_height (int): The output features' height.
pooled_width (int): The output features' width.
spatial_scale (float): A scaling factor that maps the raw image coordinates to the input
feature map coordinates. Suppose the height of a RoI is `ori_h` in the raw image and `fea_h` in the
input feature map, the `spatial_scale` should be `fea_h / ori_h`.
sample_num (int): Number of sampling points. Default: 2.
roi_end_mode (int): Number must be 0 or 1. Default: 1.
Inputs:
- **features** (Tensor) - The input features, whose shape should be `(N, C, H, W)`.
- **rois** (Tensor) - The shape is `(rois_n, 5)`. With data type of float16 or float32.
`rois_n` represents the number of RoI. The size of the second dimension should be `5` and the `5` colunms
are `(image_index, top_left_x, top_left_y, bottom_right_x, bottom_right_y)`. `image_index` represents the
index of image. `top_left_x` and `top_left_y` represent the `x, y` coordinates of the top left corner
of corresponding RoI, respectively. `bottom_right_x` and `bottom_right_y` represent the `x, y`
coordinates of the bottom right corner of corresponding RoI, respectively.
Outputs:
Tensor, the shape is `(rois_n, C, pooled_height, pooled_width)`.
Examples:
>>> input_tensor = Tensor(np.array([[[[1., 2.], [3., 4.]]]]), mindspore.float32)
>>> rois = Tensor(np.array([[0, 0.2, 0.3, 0.2, 0.3]]), mindspore.float32)
>>> roi_align = P.ROIAlign(2, 2, 0.5, 2)
>>> output_tensor = roi_align(input_tensor, rois)
>>> assert output_tensor == Tensor(np.array([[[[2.15]]]]), mindspore.float32)
"""
@prim_attr_register
def __init__(self, pooled_height, pooled_width, spatial_scale, sample_num=2, roi_end_mode=1):
"""init ROIAlign"""
validator.check_value_type("pooled_height", pooled_height, [int], self.name)
validator.check_value_type("pooled_width", pooled_width, [int], self.name)
validator.check_value_type("spatial_scale", spatial_scale, [float], self.name)
validator.check_value_type("sample_num", sample_num, [int], self.name)
validator.check_value_type("roi_end_mode", roi_end_mode, [int], self.name)
validator.check_int_range("roi_end_mode", roi_end_mode, 0, 1, Rel.INC_BOTH, self.name)
self.pooled_height = pooled_height
self.pooled_width = pooled_width
self.spatial_scale = spatial_scale
self.sample_num = sample_num
self.roi_end_mode = roi_end_mode
def infer_shape(self, inputs_shape, rois_shape):
return [rois_shape[0], inputs_shape[1], self.pooled_height, self.pooled_width]
def infer_dtype(self, inputs_type, rois_type):
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same({"inputs_type": inputs_type}, valid_types, self.name)
validator.check_tensor_type_same({"rois_type": rois_type}, valid_types, self.name)
return inputs_type
class Adam(PrimitiveWithInfer):
r"""
Updates gradients by Adaptive Moment Estimation (Adam) algorithm.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Tensor) - Weights to be updated.
- **m** (Tensor) - The 1st moment vector in the updating formula, has the same type as `var`.
- **v** (Tensor) - the 2nd moment vector in the updating formula.
Mean square gradients with the same type as `var`.
- **beta1_power** (float) - :math:`beta_1^t` in the updating formula.
- **beta2_power** (float) - :math:`beta_2^t` in the updating formula.
- **lr** (float) - :math:`l` in the updating formula.
- **beta1** (float) - The exponential decay rate for the 1st moment estimations.
- **beta2** (float) - The exponential decay rate for the 2nd moment estimations.
- **epsilon** (float) - Term added to the denominator to improve numerical stability.
- **gradient** (Tensor) - Gradients, has the same type as `var`.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adam = P.Adam()
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad):
>>> out = self.apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,
>>> epsilon, grad)
>>> return out
>>> net = Net()
>>> gradient = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> result = net(0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient)
"""
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
return var_dtype, m_dtype, v_dtype
class FusedSparseAdam(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update parameters by Adaptive Moment Estimation (Adam)
algorithm. This operator is used when the gradient is sparse.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Parameter) - Parameters to be updated with float32 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same type as `var` with
float32 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients, has the same type as
`var` with float32 data type.
- **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.
- **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.
- **lr** (Tensor) - :math:`l` in the updating formula. With float32 data type.
- **beta1** (Tensor) - The exponential decay rate for the 1st moment estimations with float32 data type.
- **beta2** (Tensor) - The exponential decay rate for the 2nd moment estimations with float32 data type.
- **epsilon** (Tensor) - Term added to the denominator to improve numerical stability with float32 data type.
- **gradient** (Tensor) - Gradient value with float32 data type.
- **indices** (Tensor) - Gradient indices with int32 data type.
Outputs:
Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **m** (Tensor) - A Tensor with shape (1,).
- **v** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adam = P.FusedSparseAdam()
>>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices):
>>> out = self.sparse_apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,
>>> epsilon, grad, indices)
>>> return out
>>> net = Net()
>>> beta1_power = Tensor(0.9, mstype.float32)
>>> beta2_power = Tensor(0.999, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.999, mstype.float32)
>>> epsilon = Tensor(1e-8, mstype.float32)
>>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)
>>> indices = Tensor([0, 1], mstype.int32)
>>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseLazyAdam(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update parameters by Adaptive Moment Estimation (Adam)
algorithm. This operator is used when the gradient is sparse. The behavior is not equivalent to the
original Adam algorithm, as only the current indices parameters will be updated.
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
l = \alpha * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \\
w = w - l * \frac{m}{\sqrt{v} + \epsilon}
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`l` represents scaling factor `lr`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and
`beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\epsilon` represents
`epsilon`.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If true, updates of the var, m, and v tensors will be protected by a lock.
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Parameter) - Parameters to be updated with float32 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same type as `var` with
float32 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients, has the same type as
`var` with float32 data type.
- **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.
- **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.
- **lr** (Tensor) - :math:`l` in the updating formula with float32 data type.
- **beta1** (Tensor) - The exponential decay rate for the 1st moment estimations with float32 data type.
- **beta2** (Tensor) - The exponential decay rate for the 2nd moment estimations with float32 data type.
- **epsilon** (Tensor) - Term added to the denominator to improve numerical stability with float32 data type.
- **gradient** (Tensor) - Gradient value with float32 data type.
- **indices** (Tensor) - Gradient indices with int32 data type.
Outputs:
Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **m** (Tensor) - A Tensor with shape (1,).
- **v** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_lazyadam = P.FusedSparseLazyAdam()
>>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices):
>>> out = self.sparse_apply_lazyadam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1,
>>> beta2, epsilon, grad, indices)
>>> return out
>>> net = Net()
>>> beta1_power = Tensor(0.9, mstype.float32)
>>> beta2_power = Tensor(0.999, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.999, mstype.float32)
>>> epsilon = Tensor(1e-8, mstype.float32)
>>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)
>>> indices = Tensor([0, 1], mstype.int32)
>>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('beta1', dtype=sig.sig_dtype.T),
sig.make_sig('beta2', dtype=sig.sig_dtype.T),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False, use_nesterov=False):
validator.check_value_type("use_locking", use_locking, [bool], self.name)
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',
'epsilon', 'grad', 'indices'],
outputs=['var', 'm', 'v'])
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):
validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name)
validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return [1], [1], [1]
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, mstype.number_type, self.name)
args = {"beta1_power": beta1_power_dtype, "beta2_power": beta2_power_dtype, 'lr': lr_dtype,
"beta1": beta1_dtype, "beta2": beta2_dtype, "epsilon": epsilon_dtype}
validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, m_dtype, v_dtype
class FusedSparseFtrl(PrimitiveWithInfer):
"""
Merge the duplicate value of the gradient and then update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`. The shape
of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
Tuple of 3 Tensor, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **accum** (Tensor) - A Tensor with shape (1,).
- **linear** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlNet, self).__init__()
>>> self.sparse_apply_ftrl = P.FusedSparseFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlNet()
>>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32))
>>> indices = Tensor(np.array([0, 1]).astype(np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'indices'],
outputs=['output'])
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return [1], [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class FusedSparseProximalAdagrad(PrimitiveWithInfer):
r"""
Merge the duplicate value of the gradient and then update relevant entries according to the proximal adagrad
algorithm.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the variable and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. The data type must be float32.
- **accum** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Tensor) - The learning rate value. The data type must be float32.
- **l1** (Tensor) - l1 regularization strength. The data type must be float32.
- **l2** (Tensor) - l2 regularization strength. The data type must be float32.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient. The data type must be float32.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`. The data type
must be int32.
Outputs:
Tuple of 2 Tensors, this operator will update the input parameters directly, the outputs are useless.
- **var** (Tensor) - A Tensor with shape (1,).
- **accum** (Tensor) - A Tensor with shape (1,).
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_proximal_adagrad = P.FusedSparseProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name="accum")
>>> self.lr = Tensor(0.01, mstype.float32)
>>> self.l1 = Tensor(0.0, mstype.float32)
>>> self.l2 = Tensor(0.0, mstype.float32)
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1,
>>> self.l2, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32))
>>> indices = Tensor(np.array([0, 1]).astype(np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('l1', dtype=sig.sig_dtype.T),
sig.make_sig('l2', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
return [1], [1]
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class KLDivLoss(PrimitiveWithInfer):
r"""
Computes the Kullback-Leibler divergence between the target and the output.
Note:
Sets input as :math:`x`, input label as :math:`y`, output as :math:`\ell(x, y)`.
Let,
.. math::
L = \{l_1,\dots,l_N\}^\top, \quad
l_n = y_n \cdot (\log y_n - x_n)
Then,
.. math::
\ell(x, y) = \begin{cases}
L, & \text{if reduction} = \text{`none';}\\
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
Args:
reduction (str): Specifies the reduction to be applied to the output.
Its value should be one of 'none', 'mean', 'sum'. Default: 'mean'.
Inputs:
- **input_x** (Tensor) - The input Tensor. The data type must be float32.
- **input_y** (Tensor) - The label Tensor which has the same shape as `input_x`. The data type must be float32.
Outputs:
Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`.
Otherwise it is a scalar.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.kldiv_loss = P.KLDivLoss()
>>> def construct(self, x, y):
>>> result = self.kldiv_loss(x, y)
>>> return result
>>>
>>> net = Net()
>>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)
>>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32)
>>> result = net(input_x, input_y)
"""
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
return x_type
class BinaryCrossEntropy(PrimitiveWithInfer):
r"""
Computes the Binary Cross Entropy between the target and the output.
Note:
Sets input as :math:`x`, input label as :math:`y`, output as :math:`\ell(x, y)`.
Let,
.. math::
L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right]
Then,
.. math::
\ell(x, y) = \begin{cases}
L, & \text{if reduction} = \text{`none';}\\
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
Args:
reduction (str): Specifies the reduction to be applied to the output.
Its value should be one of 'none', 'mean', 'sum'. Default: 'mean'.
Inputs:
- **input_x** (Tensor) - The input Tensor. The data type should be float16 or float32.
- **input_y** (Tensor) - The label Tensor which has same shape and data type as `input_x`.
- **weight** (Tensor, optional) - A rescaling weight applied to the loss of each batch element.
And it should have same shape and data type as `input_x`. Default: None.
Outputs:
Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`.
Otherwise, the output is a scalar.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.binary_cross_entropy = P.BinaryCrossEntropy()
>>> def construct(self, x, y, weight):
>>> result = self.binary_cross_entropy(x, y, weight)
>>> return result
>>>
>>> net = Net()
>>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)
>>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32)
>>> weight = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> result = net(input_x, input_y, weight)
0.38240486
"""
@prim_attr_register
def __init__(self, reduction='mean'):
self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)
def infer_shape(self, x_shape, y_shape, weight_shape):
validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)
if weight_shape:
validator.check('y_shape', y_shape, 'weight_shape', weight_shape, Rel.EQ, self.name)
if self.reduction in ('mean', 'sum'):
shape = []
else:
shape = x_shape
return shape
def infer_dtype(self, x_type, y_type, weight_type):
args = {'x': x_type, 'y': y_type}
valid_types = (mstype.float16, mstype.float32)
validator.check_tensor_type_same(args, valid_types, self.name)
if weight_type:
validator.check_tensor_type_same({'x': x_type, 'weight': weight_type}, valid_types, self.name)
return x_type
class ApplyAdaMax(PrimitiveWithInfer):
r"""
Update relevant entries according to the adamax scheme.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m_{t} = \beta_1 * m_{t-1} + (1 - \beta_1) * g \\
v_{t} = \max(\beta_2 * v_{t-1}, \left| g \right|) \\
var = var - \frac{l}{1 - \beta_1^t} * \frac{m_{t}}{v_{t} + \epsilon}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`v` represents the 2nd moment vector, :math:`v_{t-1}`
is the last momentent of :math:`v_{t}`, :math:`l` represents scaling factor `lr`,
:math:`g` represents `grad`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`beta_1^t` represents `beta1_power`, :math:`var` represents the variable to be updated,
:math:`\epsilon` represents `epsilon`.
Inputs of `var`, `m`, `v` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable to be updated. With float32 or float16 data type.
- **m** (Parameter) - The 1st moment vector in the updating formula, has the same shape and type as `var`.
With float32 or float16 data type.
- **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients
with the same shape and type as `var`. With float32 or float16 data type.
- **beta1_power** (Union[Number, Tensor]) - :math:`beta_1^t` in the updating formula, should be scalar.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - Learning rate, :math:`l` in the updating formula, should be scalar.
With float32 or float16 data type.
- **beta1** (Union[Number, Tensor]) - The exponential decay rate for the 1st moment estimations,
should be scalar. With float32 or float16 data type.
- **beta2** (Union[Number, Tensor]) - The exponential decay rate for the 2nd moment estimations,
should be scalar. With float32 or float16 data type.
- **epsilon** (Union[Number, Tensor]) - A small value added for numerical stability, should be scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor for gradient, has the same shape and type as `var`.
With float32 or float16 data type.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_ada_max = P.ApplyAdaMax()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.v = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="v")
>>> def construct(self, beta1_power, lr, beta1, beta2, epsilon, grad):
>>> out = self.apply_ada_max(self.var, self.m, self.v, beta1_power, lr, beta1, beta2, epsilon, grad)
>>> return out
>>> net = Net()
>>> beta1_power =Tensor(0.9, mstype.float32)
>>> lr = Tensor(0.001, mstype.float32)
>>> beta1 = Tensor(0.9, mstype.float32)
>>> beta2 = Tensor(0.99, mstype.float32)
>>> epsilon = Tensor(1e-10, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(beta1_power, lr, beta1, beta2, epsilon, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T1),
sig.make_sig('lr', dtype=sig.sig_dtype.T2),
sig.make_sig('beta1', dtype=sig.sig_dtype.T3),
sig.make_sig('beta2', dtype=sig.sig_dtype.T4),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T5),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"""init ApplyAdaMax"""
def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, lr_shape,
beta1_shape, beta2_shape, epsilon_shape, grad_shape):
validator.check("m_shape", m_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("v_shape", v_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
beta1_power_shp_len = len(beta1_power_shape)
validator.check_integer("beta1 power's rank", beta1_power_shp_len, 1, Rel.LE, self.name)
if beta1_power_shp_len == 1:
validator.check_integer("beta1_power_shape[0]", beta1_power_shape[0], 1, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
beta1_shp_len = len(beta1_shape)
validator.check_integer("beta1's rank", beta1_shp_len, 1, Rel.LE, self.name)
if beta1_shp_len == 1:
validator.check_integer("beta1_shape[0]", beta1_shape[0], 1, Rel.EQ, self.name)
beta2_shp_len = len(beta2_shape)
validator.check_integer("beta2's rank", beta2_shp_len, 1, Rel.LE, self.name)
if beta2_shp_len == 1:
validator.check_integer("beta2_shape[0]", beta2_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("epsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape, v_shape
def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, lr_dtype,
beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "m": m_dtype, "v": v_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1_power": beta1_power_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta1": beta1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta2": beta2_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, m_dtype, v_dtype
class ApplyAdadelta(PrimitiveWithInfer):
r"""
Update relevant entries according to the adadelta scheme.
.. math::
accum = \rho * accum + (1 - \rho) * grad^2
.. math::
\text{update} = \sqrt{\text{accum_update} + \epsilon} * \frac{grad}{\sqrt{accum + \epsilon}}
.. math::
\text{accum_update} = \rho * \text{accum_update} + (1 - \rho) * update^2
.. math::
var -= lr * update
Inputs of `var`, `accum`, `accum_update` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Weights to be updated. With float32 or float16 data type.
- **accum** (Parameter) - Accumulation to be updated, has the same shape and type as `var`.
With float32 or float16 data type.
- **accum_update** (Parameter) - Accum_update to be updated, has the same shape and type as `var`.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - Learning rate, should be scalar. With float32 or float16 data type.
- **rho** (Union[Number, Tensor]) - Decay rate, should be scalar. With float32 or float16 data type.
- **epsilon** (Union[Number, Tensor]) - A small value added for numerical stability, should be scalar.
With float32 or float16 data type.
- **grad** (Tensor) - Gradients, has the same shape and type as `var`. With float32 or float16 data type.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
- **accum_update** (Tensor) - The same shape and data type as `accum_update`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adadelta = P.ApplyAdadelta()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.accum_update = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum_update")
>>> def construct(self, lr, rho, epsilon, grad):
>>> out = self.apply_adadelta(self.var, self.accum, self.accum_update, lr, rho, epsilon, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> rho = Tensor(0.0, mstype.float32)
>>> epsilon = Tensor(1e-6, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, rho, epsilon, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum_update', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('rho', dtype=sig.sig_dtype.T2),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"""init ApplyAdadelta"""
def infer_shape(self, var_shape, accum_shape, accum_update_shape, lr_shape, rho_shape,
epsilon_shape, grad_shape):
validator.check("accum_shape", accum_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("accum_update_shape", accum_update_shape, "var_shape", var_shape, Rel.EQ, self.name)
validator.check("grad_shape", grad_shape, "var_shape", var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
rho_shp_len = len(rho_shape)
validator.check_integer("rho's rank", rho_shp_len, 1, Rel.LE, self.name)
if rho_shp_len == 1:
validator.check_integer("rho_shape[0]", rho_shape[0], 1, Rel.EQ, self.name)
epsilon_shp_len = len(epsilon_shape)
validator.check_integer("lepsilon's rank", epsilon_shp_len, 1, Rel.LE, self.name)
if epsilon_shp_len == 1:
validator.check_integer("epsilon_shape[0]", epsilon_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape, accum_update_shape
def infer_dtype(self, var_dtype, accum_dtype, accum_update_dtype, lr_dtype, rho_dtype,
epsilon_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {"var": var_dtype, "accum": accum_dtype, "accum_update": accum_update_dtype, "grad": grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"rho": rho_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"epsilon": epsilon_dtype}, valid_types, self.name)
return var_dtype, accum_dtype, accum_update_dtype
class ApplyAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum}}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent..
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
update_slots (bool): If `True`, `accum` will be updated. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. With float32 or float16 data type.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
With float32 or float16 data type.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be scalar. With float32 or float16 data type.
- **grad** (Tensor) - A tensor for gradient. The shape and dtype should be the same as `var`.
With float32 or float16 data type.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adagrad = P.ApplyAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> def construct(self, lr, grad):
>>> out = self.apply_adagrad(self.var, self.accum, lr, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, update_slots=True):
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
valid_types = [mstype.float16, mstype.float32]
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class ApplyAdagradV2(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagradv2 scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum} + \epsilon}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
epsilon (float): A small value added for numerical stability.
update_slots (bool): If `True`, `accum` will be updated. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. With float16 or float32 data type.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
With float16 or float32 data type.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a float number or
a scalar tensor with float16 or float32 data type.
- **grad** (Tensor) - A tensor for gradient. The shape and dtype should be the same as `var`.
With float16 or float32 data type.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_adagrad_v2 = P.ApplyAdagradV2(epsilon=1e-6)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> def construct(self, lr, grad):
>>> out = self.apply_adagrad_v2(self.var, self.accum, lr, grad)
>>> return out
>>> net = Net()
>>> lr = Tensor(0.001, mstype.float32)
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> result = net(lr, grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, epsilon, update_slots=True):
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'grad shape', grad_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, [mstype.float16, mstype.float32], self.name)
return var_dtype, accum_dtype
class SparseApplyAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * (1 / sqrt(accum))
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): Learning rate.
update_slots (bool): If `True`, `accum` will be updated. Default: True.
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.
Has the same data type as `var`.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adagrad = P.SparseApplyAdagrad(lr=1e-8)
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="accum")
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_adagrad(self.var, self.accum, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> indices = Tensor([0, 1, 2], mstype.int32)
>>> result = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, update_slots=True, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_number_range("lr", lr, float("-inf"), float("inf"), Rel.INC_NEITHER, self.name)
validator.check_value_type("update_slots", update_slots, [bool], self.name)
validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class SparseApplyAdagradV2(PrimitiveWithInfer):
r"""
Update relevant entries according to the adagrad scheme.
.. math::
accum += grad * grad
.. math::
var -= lr * grad * \frac{1}{\sqrt{accum} + \epsilon}
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): Learning rate.
epsilon (float): A small value added for numerical stability.
use_locking (bool): If `True`, the var and accumulation tensors will be protected from being updated.
Default: False.
update_slots (bool): If `True`, the computation logic will be different to `False`. Default: True.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.
- **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.
Has the same data type as `var`.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> import mindspore.common.dtype as mstype
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_adagrad_v2 = P.SparseApplyAdagradV2(lr=1e-8, epsilon=1e-6)
>>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="accum")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_adagrad_v2(self.var, self.accum, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))
>>> indices = Tensor([0, 1, 2], mstype.int32)
>>> result = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, epsilon, use_locking=False, update_slots=True):
self.lr = validator.check_value_type("lr", lr, [float], self.name)
self.epsilon = validator.check_value_type("epsilon", epsilon, [float], self.name)
self.use_locking = validator.check_value_type("update_slots", update_slots, [bool], self.name)
self.update_slots = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_type, accum_type, grad_type, indices_type):
args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)
return var_type, accum_type
class ApplyProximalAdagrad(PrimitiveWithInfer):
r"""
Update relevant entries according to the proximal adagrad algorithm.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable to be updated. The data type should be float16 or float32.
- **accum** (Parameter) - Accumulation to be updated. Must has the same shape and dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be scalar. The data type should be
float16 or float32.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, should be scalar. The data type should be
float16 or float32.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, should be scalar. The data type should be
float16 or float32.
- **grad** (Tensor) - Gradient with the same shape and dtype as `var`.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_proximal_adagrad = P.ApplyProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.lr = 0.01
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, grad):
>>> out = self.apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape):
validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)
validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)
lr_shp_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shp_len, 1, Rel.LE, self.name)
if lr_shp_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
l1_shp_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shp_len, 1, Rel.LE, self.name)
if l1_shp_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shp_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shp_len, 1, Rel.LE, self.name)
if l2_shp_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape, accum_shape
def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype, accum_dtype
class SparseApplyProximalAdagrad(PrimitiveWithCheck):
r"""
Update relevant entries according to the proximal adagrad algorithm. Compared with ApplyProximalAdagrad,
an additional index tensor is input.
.. math::
accum += grad * grad
.. math::
\text{prox_v} = var - lr * grad * \frac{1}{\sqrt{accum}}
.. math::
var = \frac{sign(\text{prox_v})}{1 + lr * l2} * \max(\left| \text{prox_v} \right| - lr * l1, 0)
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value. Tshould be a float number or
a scalar tensor with float16 or float32 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength. should be a float number or
a scalar tensor with float16 or float32 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength. should be a float number or
a scalar tensor with float16 or float32 data type..
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **accum** (Tensor) - The same shape and data type as `accum`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.lr = 0.01
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1,
self.l2, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones((3,), np.int32))
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T4),
)
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],
outputs=['var', 'accum'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):
args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, [mstype.float16, mstype.float32], self.name)
valid_types = [mstype.int16, mstype.int32, mstype.int64,
mstype.uint16, mstype.uint32, mstype.uint64]
validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)
class ApplyAddSign(PrimitiveWithInfer):
r"""
Update relevant entries according to the AddSign algorithm.
.. math::
\begin{array}{ll} \\
m_{t} = \beta * m_{t-1} + (1 - \beta) * g \\
\text{update} = (\alpha + \text{sign_decay} * sign(g) * sign(m)) * g \\
var = var - lr_{t} * \text{update}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`lr` represents scaling factor `lr`, :math:`g` represents `grad`.
Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules
to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **m** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a scalar.
With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **sign_decay** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **beta** (Union[Number, Tensor]) - The exponential decay rate, should be a scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_add_sign = P.ApplyAddSign()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.lr = 0.001
>>> self.alpha = 1.0
>>> self.sign_decay = 0.99
>>> self.beta = 0.9
>>> def construct(self, grad):
>>> out = self.apply_add_sign(self.var, self.m, self.lr, self.alpha, self.sign_decay, self.beta, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T1),
sig.make_sig('alpha', dtype=sig.sig_dtype.T2),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T3),
sig.make_sig('beta', dtype=sig.sig_dtype.T3),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyAddSign"
def infer_shape(self, var_shape, m_shape, lr_shape, alpha_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, alpha_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyPowerSign(PrimitiveWithInfer):
r"""
Update relevant entries according to the AddSign algorithm.
.. math::
\begin{array}{ll} \\
m_{t} = \beta * m_{t-1} + (1 - \beta) * g \\
\text{update} = \exp(\text{logbase} * \text{sign_decay} * sign(g) * sign(m)) * g \\
var = var - lr_{t} * \text{update}
\end{array}
:math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`
is the last momentent of :math:`m_{t}`, :math:`lr` represents scaling factor `lr`, :math:`g` represents `grad`.
All of inputs comply with the implicit type conversion rules to make the data types consistent.
If `lr`, `logbase`, `sign_decay` or `beta` is a number, the number is automatically converted to Tensor,
and the data type is consistent with the Tensor data type involved in the operation.
If inputs are tensors and have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
If data type of `var` is float16, all inputs must have the same data type as `var`.
- **m** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.
- **lr** (Union[Number, Tensor]) - The learning rate value, should be a scalar.
With float32 or float16 data type.
- **logbase** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **sign_decay** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.
- **beta** (Union[Number, Tensor]) - The exponential decay rate, should be a scalar.
With float32 or float16 data type.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
Outputs:
Tuple of 2 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_power_sign = P.ApplyPowerSign()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="m")
>>> self.lr = 0.001
>>> self.logbase = np.e
>>> self.sign_decay = 0.99
>>> self.beta = 0.9
>>> def construct(self, grad):
>>> out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase,
self.sign_decay, self.beta, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(grad)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('lr', dtype=sig.sig_dtype.T),
sig.make_sig('logbase', dtype=sig.sig_dtype.T),
sig.make_sig('sign_decay', dtype=sig.sig_dtype.T),
sig.make_sig('beta', dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyPowerSign"
def infer_shape(self, var_shape, m_shape, lr_shape, logbase_shape, sign_decay_shape, beta_shape, grad_shape):
validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)
validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)
lr_shape_len = len(lr_shape)
validator.check_integer("lr's rank", lr_shape_len, 1, Rel.LE, self.name)
if lr_shape_len == 1:
validator.check_integer("lr_shape[0]", lr_shape[0], 1, Rel.EQ, self.name)
logbase_shape_len = len(logbase_shape)
validator.check_integer("logbase's rank", logbase_shape_len, 1, Rel.LE, self.name)
if logbase_shape_len == 1:
validator.check_integer("logbase_shape[0]", logbase_shape[0], 1, Rel.EQ, self.name)
sign_decay_shape_len = len(sign_decay_shape)
validator.check_integer("sign_decay's rank", sign_decay_shape_len, 1, Rel.LE, self.name)
if sign_decay_shape_len == 1:
validator.check_integer("sign_decay_shape[0]", sign_decay_shape[0], 1, Rel.EQ, self.name)
beta_shape_len = len(beta_shape)
validator.check_integer("beta's rank", beta_shape_len, 1, Rel.LE, self.name)
if beta_shape_len == 1:
validator.check_integer("beta_shape[0]", beta_shape[0], 1, Rel.EQ, self.name)
return var_shape, m_shape
def infer_dtype(self, var_dtype, m_dtype, lr_dtype, logbase_dtype, sign_decay_dtype, beta_dtype, grad_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"logbase": logbase_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"sign_decay": sign_decay_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"beta": beta_dtype}, valid_types, self.name)
return var_dtype, m_dtype
class ApplyGradientDescent(PrimitiveWithInfer):
r"""
Update relevant entries according to the following formula.
.. math::
var = var - \alpha * \delta
Inputs of `var` and `delta` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Scaling factor, should be a scalar. With float32 or float16 data type.
- **delta** (Tensor) - A tensor for the change, has the same type as `var`.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_gradient_descent = P.ApplyGradientDescent()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.alpha = 0.001
>>> def construct(self, delta):
>>> out = self.apply_gradient_descent(self.var, self.alpha, delta)
>>> return out
>>> net = Net()
>>> delta = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(delta)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyGradientDescent"
def infer_shape(self, var_shape, alpha_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
return var_dtype
class ApplyProximalGradientDescent(PrimitiveWithInfer):
r"""
Update relevant entries according to the FOBOS(Forward Backward Splitting) algorithm.
.. math::
\text{prox_v} = var - \alpha * \delta
.. math::
var = \frac{sign(\text{prox_v})}{1 + \alpha * l2} * \max(\left| \text{prox_v} \right| - alpha * l1, 0)
Inputs of `var` and `delta` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Inputs:
- **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.
- **alpha** (Union[Number, Tensor]) - Saling factor, should be a scalar. With float32 or float16 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, should be scalar.
With float32 or float16 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, should be scalar.
With float32 or float16 data type.
- **delta** (Tensor) - A tensor for the change, has the same type as `var`.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter
>>> from mindspore.ops import operations as P
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.apply_proximal_gradient_descent = P.ApplyProximalGradientDescent()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.alpha = 0.001
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> def construct(self, delta):
>>> out = self.apply_proximal_gradient_descent(self.var, self.alpha, self.l1, self.l2, delta)
>>> return out
>>> net = Net()
>>> delta = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> output = net(delta)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1),
sig.make_sig('l1', dtype=sig.sig_dtype.T2),
sig.make_sig('l2', dtype=sig.sig_dtype.T3),
sig.make_sig('delta', dtype=sig.sig_dtype.T),
)
@prim_attr_register
def __init__(self):
"init ApplyGradientDescent"
def infer_shape(self, var_shape, alpha_shape, l1_shape, l2_shape, delta_shape):
validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)
alpha_shape_len = len(alpha_shape)
validator.check_integer("alpha's rank", alpha_shape_len, 1, Rel.LE, self.name)
if alpha_shape_len == 1:
validator.check_integer("alpha_shape[0]", alpha_shape[0], 1, Rel.EQ, self.name)
l1_shape_len = len(l1_shape)
validator.check_integer("l1's rank", l1_shape_len, 1, Rel.LE, self.name)
if l1_shape_len == 1:
validator.check_integer("l1_shape[0]", l1_shape[0], 1, Rel.EQ, self.name)
l2_shape_len = len(l2_shape)
validator.check_integer("l2's rank", l2_shape_len, 1, Rel.LE, self.name)
if l2_shape_len == 1:
validator.check_integer("l2_shape[0]", l2_shape[0], 1, Rel.EQ, self.name)
return var_shape
def infer_dtype(self, var_dtype, alpha_dtype, l1_dtype, l2_dtype, delta_dtype):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_dtype, 'delta': delta_dtype}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"alpha": alpha_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_dtype}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_dtype}, valid_types, self.name)
return var_dtype
class LARSUpdate(PrimitiveWithInfer):
"""
Conduct lars (layer-wise adaptive rate scaling) update on the square sum of gradient.
Args:
epsilon (float): Term added to the denominator to improve numerical stability. Default: 1e-05.
hyperpara (float): Trust coefficient for calculating the local learning rate. Default: 0.001.
use_clip (bool): Whether to use clip operation for calculating the local learning rate. Default: False.
Inputs:
- **weight** (Tensor) - The weight to be updated.
- **gradient** (Tensor) - The gradient of weight, which has the same shape and dtype with weight.
- **norm_weight** (Tensor) - A scalar tensor, representing the square sum of weight.
- **norm_gradient** (Tensor) - A scalar tensor, representing the square sum of gradient.
- **weight_decay** (Union[Number, Tensor]) - Weight decay. It should be a scalar tensor or number.
- **learning_rate** (Union[Number, Tensor]) - Learning rate. It should be a scalar tensor or number.
Outputs:
Tensor, represents the new gradient.
Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import functional as F
>>> import mindspore.nn as nn
>>> import numpy as np
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.lars = P.LARSUpdate()
>>> self.reduce = P.ReduceSum()
>>> def construct(self, weight, gradient):
>>> w_square_sum = self.reduce(F.square(weight))
>>> grad_square_sum = self.reduce(F.square(gradient))
>>> grad_t = self.lars(weight, gradient, w_square_sum, grad_square_sum, 0.0, 1.0)
>>> return grad_t
>>> weight = np.random.random(size=(2, 3)).astype(np.float32)
>>> gradient = np.random.random(size=(2, 3)).astype(np.float32)
>>> net = Net()
>>> ms_output = net(Tensor(weight), Tensor(gradient))
"""
@prim_attr_register
def __init__(self, epsilon=1e-05, hyperpara=0.001, use_clip=False):
"""init"""
validator.check_value_type("epsilon", epsilon, [float], self.name)
validator.check_value_type("hyperpara", hyperpara, [float], self.name)
validator.check_value_type("use_clip", use_clip, [bool], self.name)
def infer_shape(self, weight_shape, gradient_shape, norm_weight_shape, norm_gradient_shape, weight_decay_shape,
learning_rate_shape):
validator.check("weight shape", weight_shape, "gradient shape", gradient_shape, Rel.EQ, self.name)
validator.check("norm weight shape", norm_weight_shape, "norm gradient shape", norm_gradient_shape, Rel.EQ,
self.name)
shp_len = len(weight_decay_shape)
validator.check_integer("weight decay's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("weight_decay_shape[0]", weight_decay_shape[0], 1, Rel.EQ, self.name)
shp_len = len(learning_rate_shape)
validator.check_integer("learning rate's rank", shp_len, 1, Rel.LE, self.name)
if shp_len == 1:
validator.check_integer("learning_rate_shape[0]", learning_rate_shape[0], 1, Rel.EQ, self.name)
return weight_shape
def infer_dtype(self, weight_dtype, gradient_dtype, norm_weight_dtype, norm_gradient_dtype,
weight_decay_dtype, learning_rate_dtype):
args = {"Weight dtype": weight_dtype, "gradient dtype": gradient_dtype, "norm weight dtype": norm_weight_dtype,
"norm gradient dtype": norm_gradient_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32, mstype.int16, mstype.int32], self.name)
validator.check_scalar_or_tensor_type_same({"weight_decay": weight_decay_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
validator.check_scalar_or_tensor_type_same({"learning_rate": learning_rate_dtype},
[mstype.float16, mstype.float32, mstype.float64], self.name)
return weight_dtype
class ApplyFtrl(PrimitiveWithInfer):
"""
Update relevant entries according to the FTRL scheme.
Args:
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type should be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - Gradient. The data type should be float16 or float32.
- **lr** (Union[Number, Tensor]) - The learning rate value, must be positive. Default: 0.001.
It should be a float number or a scalar tensor with float16 or float32 data type.
- **l1** (Union[Number, Tensor]) - l1 regularization strength, must be greater than or equal to zero.
Default: 0.0. It should be a float number or a scalar tensor with float16 or float32 data type.
- **l2** (Union[Number, Tensor]) - l2 regularization strength, must be greater than or equal to zero.
Default: 0.0. It should be a float number or a scalar tensor with float16 or float32 data type.
- **lr_power** (Union[Number, Tensor]) - Learning rate power controls how the learning rate decreases
during training, must be less than or equal to zero. Use fixed learning rate if lr_power is zero.
Default: -0.5. It should be a float number or a scalar tensor with float16 or float32 data type.
Outputs:
Tensor, represents the updated `var`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class ApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(ApplyFtrlNet, self).__init__()
>>> self.apply_ftrl = P.ApplyFtrl()
>>> self.lr = 0.001
>>> self.l1 = 0.0
>>> self.l2 = 0.0
>>> self.lr_power = -0.5
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad):
>>> out = self.apply_ftrl(self.var, self.accum, self.linear, grad, self.lr, self.l1, self.l2,
>>> self.lr_power)
>>> return out
>>>
>>> net = ApplyFtrlNet()
>>> input_x = Tensor(np.random.randint(-4, 4, (3, 3)), mindspore.float32)
>>> result = net(input_x)
[[0.67455846 0.14630564 0.160499 ]
[0.16329421 0.00415689 0.05202988]
[0.18672481 0.17418946 0.36420345]]
"""
@prim_attr_register
def __init__(self, use_locking=False):
self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'lr', 'l1', 'l2', 'lr_power'],
outputs=['output'])
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
self.is_tbe = context.get_context("device_target") == "Ascend"
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, lr_shape, l1_shape, l2_shape,
lr_power_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if self.is_tbe:
return var_shape, var_shape, var_shape
return var_shape
def infer_dtype(self, var_type, accum_type, linear_type, grad_type, lr_type, l1_type, l2_type, lr_power_type):
valid_types = [mstype.float16, mstype.float32]
args = {'var': var_type, 'accum': accum_type, 'linear': linear_type, 'grad': grad_type}
validator.check_tensor_type_same(args, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr": lr_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l1": l1_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"l2": l2_type}, valid_types, self.name)
validator.check_scalar_or_tensor_type_same({"lr_power": lr_power_type}, valid_types, self.name)
if self.is_tbe:
return var_type, var_type, var_type
return var_type
class SparseApplyFtrl(PrimitiveWithCheck):
"""
Update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): Use locks for updating operation if True . Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
- **var** (Tensor) - Tensor, has the same shape and type as `var`.
- **accum** (Tensor) - Tensor, has the same shape and type as `accum`.
- **linear** (Tensor) - Tensor, has the same shape and type as `linear`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlNet(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlNet, self).__init__()
>>> self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlNet()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones([3]), mindspore.int32)
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def check_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
def check_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
class SparseApplyFtrlV2(PrimitiveWithInfer):
"""
Update relevant entries according to the FTRL-proximal scheme.
All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, lower priority data type will be converted to
relatively highest priority data type.
RuntimeError exception will be thrown when the data type conversion of Parameter is required.
Args:
lr (float): The learning rate value, must be positive.
l1 (float): l1 regularization strength, must be greater than or equal to zero.
l2 (float): l2 regularization strength, must be greater than or equal to zero.
l2_shrinkage (float): L2 shrinkage regularization.
lr_power (float): Learning rate power controls how the learning rate decreases during training,
must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.
use_locking (bool): If `True`, the var and accumulation tensors will be protected from being updated.
Default: False.
Inputs:
- **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.
- **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.
- **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.
- **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.
- **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.
The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - Tensor, has the same shape and type as `var`.
- **accum** (Tensor) - Tensor, has the same shape and type as `accum`.
- **linear** (Tensor) - Tensor, has the same shape and type as `linear`.
Examples:
>>> import mindspore
>>> import mindspore.nn as nn
>>> import numpy as np
>>> from mindspore import Parameter
>>> from mindspore import Tensor
>>> from mindspore.ops import operations as P
>>> class SparseApplyFtrlV2Net(nn.Cell):
>>> def __init__(self):
>>> super(SparseApplyFtrlV2Net, self).__init__()
>>> self.sparse_apply_ftrl_v2 = P.SparseApplyFtrlV2(lr=0.01, l1=0.0, l2=0.0,
l2_shrinkage=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl_v2(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlV2Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones([3]), mindspore.int32)
>>> output = net(grad, indices)
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
)
@prim_attr_register
def __init__(self, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False):
validator.check_value_type("lr", lr, [float], self.name)
validator.check_value_type("l1", l1, [float], self.name)
validator.check_value_type("l2", l2, [float], self.name)
validator.check_value_type("lr_power", lr_power, [float], self.name)
self.lr = validator.check_number_range("lr", lr, 0.0, float("inf"), Rel.INC_NEITHER, self.name)
self.l1 = validator.check_number_range("l1", l1, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.l2 = validator.check_number_range("l2", l2, 0.0, float("inf"), Rel.INC_LEFT, self.name)
self.lr_power = validator.check_number("lr_power", lr_power, 0, Rel.LE, self.name)
self.l2_shrinkage = validator.check_value_type("l2_shrinkage", l2_shrinkage, [float], self.name)
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):
validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)
validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)
if len(var_shape) > 1:
validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)
validator.check_integer("indices rank", len(indices_shape), 1, Rel.EQ, self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
return var_shape, accum_shape, linear_shape
def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):
args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype,
"linear_dtype": linear_dtype, "grad_dtype": grad_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name)
return var_dtype, accum_dtype, linear_dtype
class ConfusionMulGrad(PrimitiveWithInfer):
"""
`output0` is the dot product result of input0 and input1.
`output1` is the dot product result of input0 and input1, then apply the reducesum operation on it.
Args:
axis (Union[int, tuple[int], list[int]]): The dimensions to reduce.
Default:(), reduce all dimensions. Only constant value is allowed.
keep_dims (bool):
- If true, keep these reduced dimensions and the length as 1.
- If false, don't keep these dimensions. Default:False.
Inputs:
- **input_0** (Tensor) - The input Tensor.
- **input_1** (Tensor) - The input Tensor.
- **input_2** (Tensor) - The input Tensor.
Outputs:
- **output_0** (Tensor) - The same shape as `input0`.
- **output_1** (Tensor)
- If axis is (), and keep_dims is false, the output is a 0-D array representing
the sum of all elements in the input array.
- If axis is int, set as 2, and keep_dims is false,
the shape of output is :math:`(x_1,x_3,...,x_R)`.
- If axis is tuple(int), set as (2,3), and keep_dims is false,
the shape of output is :math:`(x_1,x_4,...x_R)`.
Examples:
>>> confusion_mul_grad = P.ConfusionMulGrad()
>>> input_0 = Tensor(np.random.randint(-2, 2, (2, 3)), mindspore.float32)
>>> input_1 = Tensor(np.random.randint(0, 4, (2, 3)), mindspore.float32)
>>> input_2 = Tensor(np.random.randint(-4, 0, (2, 3)), mindspore.float32)
>>> output_0, output_1 = confusion_mul_grad(input_0, input_1, input_2)
output_0:
[[ 3. 1. 0.]
[-6. 2. -2.]]
output_1:
-3.0
"""
@prim_attr_register
def __init__(self, axis=(), keep_dims=False):
self.init_prim_io_names(inputs=["input0", "input1", "input2"], outputs=["output0", "output1"])
self.axis_ = validator.check_value_type("axis", axis, [int, tuple, list], self.name)
self.keep_dims_ = validator.check_value_type("keep_dims", keep_dims, [bool], self.name)
def infer_shape(self, input0_shape, input1_shape, input2_shape):
outshape0 = input0_shape
outshape1 = _infer_shape_reduce(input1_shape, self.axis_, self.keep_dims_, self.name)
return outshape0, outshape1
def infer_dtype(self, input0_dtype, input1_dtype, input2_dtype):
validator.check_subclass("input0_dtype", input0_dtype, mstype.tensor, self.name)
validator.check_subclass("input1_dtype", input1_dtype, mstype.tensor, self.name)
validator.check_subclass("input2_dtype", input2_dtype, mstype.tensor, self.name)
return input0_dtype, input1_dtype
class Dropout(PrimitiveWithInfer):
"""
During training, randomly zeroes some of the elements of the input tensor with probability.
Args:
keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout = P.Dropout(keep_prob=0.5)
>>> in = Tensor((20, 16, 50, 50))
>>> out = dropout(in)
"""
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 1, Rel.GE, self.name)
mask_shape = x_shape
return x_shape, mask_shape
def infer_dtype(self, x_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("x", x_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"x_dtype": x_dtype}, valid_types, self.name)
return x_dtype, x_dtype
class DropoutGrad(PrimitiveWithInfer):
"""
The gradient of Dropout. During training, randomly zeroes some of the elements
of the input tensor with probability.
Args:
keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units.
Inputs:
- **shape** (tuple[int]) - The shape of target mask.
Outputs:
Tensor, the value of generated mask for input shape.
Examples:
>>> dropout_grad = P.DropoutGrad(keep_prob=0.5)
>>> in = Tensor((20, 16, 50, 50))
>>> out = dropout_grad(in)
"""
@prim_attr_register
def __init__(self, keep_prob=0.5):
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)
def infer_shape(self, dy_shape, mask_shape):
return dy_shape
def infer_dtype(self, dy_dtype, mask_dtype):
valid_types = (mstype.float16, mstype.float32)
validator.check_subclass("dy", dy_dtype, mstype.tensor, self.name)
validator.check_subclass("mask", mask_dtype, mstype.tensor, self.name)
validator.check_tensor_type_same({"dy_dtype": dy_dtype}, valid_types, self.name)
return dy_dtype
class CTCLoss(PrimitiveWithInfer):
"""
Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.
Args:
preprocess_collapse_repeated (bool): If true, repeated labels will be collapsed prior to the CTC calculation.
Default: False.
ctc_merge_repeated (bool): If false, during CTC calculation, repeated non-blank labels will not be merged
and these labels will be interpreted as individual ones. This is a simplfied
version of CTC. Default: True.
ignore_longer_outputs_than_inputs (bool): If True, sequences with longer outputs than inputs will be ignored.
Default: False.
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_classes)`. `num_classes` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved. Default blank label is `num_classes - 1`.
Data type must be float16, float32 or float64.
- **labels_indices** (Tensor) - The indices of labels. `labels_indices[i, :] == [b, t]` means `labels_values[i]`
stores the id for `(batch b, time t)`. The type must be int64 and rank must be 2.
- **labels_values** (Tensor) - A `1-D` input tensor. The values are associated with the given batch and time.
The type must be int32. `labels_values[i]` must in the range of `[0, num_classes)`.
- **sequence_length** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not be greater than `max_time`.
Outputs:
- **loss** (Tensor) - A tensor containing log-probabilities, the shape is :math:`(batch_size)`. The tensor has
the same type with `inputs`.
- **gradient** (Tensor) - The gradient of `loss`, has the same type and shape with `inputs`.
Examples:
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> labels_indices = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int64)
>>> labels_values = Tensor(np.array([2, 2]), mindspore.int32)
>>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32)
>>> ctc_loss = P.CTCLoss()
>>> output = ctc_loss(inputs, labels_indices, labels_values, sequence_length)
"""
@prim_attr_register
def __init__(self, preprocess_collapse_repeated=False, ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False):
self.init_prim_io_names(inputs=["inputs", "labels_indices", "labels_values", "sequence_length"],
outputs=["loss", "gradient"])
validator.check_value_type("preprocess_collapse_repeated", preprocess_collapse_repeated, [bool], self.name)
self.preprocess_collapse_repeated_ = preprocess_collapse_repeated
self.ctc_merge_repeated_ = validator.check_value_type("ctc_merge_repeated", ctc_merge_repeated,
[bool], self.name)
validator.check_value_type("ignore_longer_outputs_than_inputs",
ignore_longer_outputs_than_inputs, [bool], self.name)
self.ignore_longer_outputs_than_inputs_ = ignore_longer_outputs_than_inputs
def infer_shape(self, inputs, labels_indices, labels_values, sequence_length):
validator.check_integer("inputs rank", len(inputs), 3, Rel.EQ, self.name)
validator.check_integer("labels_indices rank", len(labels_indices), 2, Rel.EQ, self.name)
validator.check_integer("labels_indices dim one", labels_indices[1], 2, Rel.EQ, self.name)
validator.check_integer("labels_values rank", len(labels_values), 1, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length), 1, Rel.EQ, self.name)
validator.check('labels_indices size', labels_indices[0], 'labels_values size',
labels_values[0], Rel.EQ, self.name)
validator.check('inputs batch_size', inputs[1], 'sequence_length batch_size',
sequence_length[0], Rel.EQ, self.name)
batch_size = []
batch_size.append(inputs[1])
return batch_size, inputs
def infer_dtype(self, inputs, labels_indices, labels_values, sequence_length):
valid_dtype = [mstype.float16, mstype.float32, mstype.double]
validator.check_tensor_type_same({"inputs_dtype": inputs}, valid_dtype, self.name)
validator.check_tensor_type_same({"labels_indices_dtype": labels_indices}, [mstype.int64], self.name)
validator.check_tensor_type_same({"labels_values_dtype": labels_values}, [mstype.int32], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length}, [mstype.int32], self.name)
return inputs, inputs
class CTCGreedyDecoder(PrimitiveWithInfer):
"""
Performs greedy decoding on the logits given in inputs.
Args:
merge_repeated (bool): If True, merge repeated classes in output. Default: True.
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_classes)`. `num_classes` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved. Default blank label is `num_classes - 1`.
Data type must be float32 or float64.
- **sequence_length** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not greater than `max_time`.
Outputs:
- **decoded_indices** (Tensor) - A tensor with shape of :math:`(total_decoded_outputs, 2)`.
Data type is int64.
- **decoded_values** (Tensor) - A tensor with shape of :math:`(total_decoded_outputs)`,
it stores the decoded classes. Data type is int64.
- **decoded_shape** (Tensor) - The value of tensor is :math:`[batch_size, max_decoded_legth]`.
Data type is int64.
- **log_probability** (Tensor) - A tensor with shape of :math:`(batch_size, 1)`,
containing sequence log-probability, has the same type as `inputs`.
Examples:
>>> class CTCGreedyDecoderNet(nn.Cell):
>>> def __init__(self):
>>> super(CTCGreedyDecoderNet, self).__init__()
>>> self.ctc_greedy_decoder = P.CTCGreedyDecoder()
>>> self.assert_op = P.Assert(300)
>>>
>>> def construct(self, inputs, sequence_length):
>>> out = self.ctc_greedy_decoder(inputs,sequence_length)
>>> self.assert_op(True, (out[0], out[1], out[2], out[3]))
>>> return out[2]
>>>
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32)
>>> net = CTCGreedyDecoderNet()
>>> output = net(inputs, sequence_length)
"""
@prim_attr_register
def __init__(self, merge_repeated=True):
self.merge_repeated = validator.check_value_type("merge_repeated", merge_repeated, [bool], self.name)
def infer_shape(self, inputs_shape, sequence_length_shape):
validator.check_integer("inputs rank", len(inputs_shape), 3, Rel.EQ, self.name)
validator.check_integer("sequence_length rank", len(sequence_length_shape), 1, Rel.EQ, self.name)
validator.check('inputs batch_size', inputs_shape[1], 'sequence_length batch_size',
sequence_length_shape[0], Rel.EQ, self.name)
total_decoded_outputs = -1
decoded_indices_shape = [total_decoded_outputs, 2]
decoded_values = [total_decoded_outputs]
decoded_shape = [2]
log_probability_shape = [inputs_shape[1], 1]
return decoded_indices_shape, decoded_values, decoded_shape, log_probability_shape
def infer_dtype(self, inputs_dtype, sequence_length_dtype):
validator.check_tensor_type_same({"inputs_dtype": inputs_dtype}, [mstype.float32, mstype.double], self.name)
validator.check_tensor_type_same({"sequence_length_dtype": sequence_length_dtype}, [mstype.int32], self.name)
decoded_type = mstype.tensor_type(mstype.int64)
return decoded_type, decoded_type, decoded_type, inputs_dtype
class BasicLSTMCell(PrimitiveWithInfer):
r"""
Applies the long short-term memory (LSTM) to the input.
.. math::
\begin{array}{ll} \\
i_t = \sigma(W_{ix} x_t + b_{ix} + W_{ih} h_{(t-1)} + b_{ih}) \\
f_t = \sigma(W_{fx} x_t + b_{fx} + W_{fh} h_{(t-1)} + b_{fh}) \\
\tilde{c}_t = \tanh(W_{cx} x_t + b_{cx} + W_{ch} h_{(t-1)} + b_{ch}) \\
o_t = \sigma(W_{ox} x_t + b_{ox} + W_{oh} h_{(t-1)} + b_{oh}) \\
c_t = f_t * c_{(t-1)} + i_t * \tilde{c}_t \\
h_t = o_t * \tanh(c_t) \\
\end{array}
Here :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. :math:`W, b`
are learnable weights between the output and the input in the formula. For instance,
:math:`W_{ix}, b_{ix}` are the weight and bias used to transform from input :math:`x` to :math:`i`.
Details can be found in paper `LONG SHORT-TERM MEMORY
<https://www.bioinf.jku.at/publications/older/2604.pdf>`_ and
`Long Short-Term Memory Recurrent Neural Network Architectures for Large Scale Acoustic Modeling
<https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/43905.pdf>`_.
Args:
keep_prob (float): If not 1.0, append `Dropout` layer on the outputs of each
LSTM layer except the last layer. Default 1.0. The range of dropout is [0.0, 1.0].
forget_bias (float): Add forget bias to forget gate biases in order to decrease former scale. Default: 1.0.
state_is_tuple (bool): If true, the state is a tuple of 2 tensors, containing h and c; If false, the state is
a tensor and it needs to be split first. Default: True.
activation (str): Activation. Default: "tanh". Only "tanh" is currently supported.
Inputs:
- **x** (Tensor) - Current words. Tensor of shape (`batch_size`, `input_size`).
The data type must be float16 or float32.
- **h** (Tensor) - Hidden state last moment. Tensor of shape (`batch_size`, `hidden_size`).
The data type must be float16 or float32.
- **c** (Tensor) - Cell state last moment. Tensor of shape (`batch_size`, `hidden_size`).
The data type must be float16 or float32.
- **w** (Tensor) - Weight. Tensor of shape (`input_size + hidden_size`, `4 x hidden_size`).
The data type must be float16 or float32.
- **b** (Tensor) - Bias. Tensor of shape (`4 x hidden_size`).
The data type must be the same as `c`.
Outputs:
- **ct** (Tensor) - Forward :math:`c_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ht** (Tensor) - Cell output. Tensor of shape (`batch_size`, `hidden_size`). With data type of float16.
- **it** (Tensor) - Forward :math:`i_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **jt** (Tensor) - Forward :math:`j_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ft** (Tensor) - Forward :math:`f_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **ot** (Tensor) - Forward :math:`o_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).
Has the same type with input `c`.
- **tanhct** (Tensor) - Forward :math:`tanh c_t` cache at moment `t`.
Tensor of shape (`batch_size`, `hidden_size`), has the same type with input `c`.
Examples:
>>> x = Tensor(np.random.rand(1, 32).astype(np.float16))
>>> h = Tensor(np.random.rand(1, 64).astype(np.float16))
>>> c = Tensor(np.random.rand(1, 64).astype(np.float16))
>>> w = Tensor(np.random.rand(96, 256).astype(np.float16))
>>> b = Tensor(np.random.rand(256, ).astype(np.float16))
>>> lstm = P.BasicLSTMCell(keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh')
>>> lstm(x, h, c, w, b)
"""
@prim_attr_register
def __init__(self, keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh'):
self.keep_prob = validator.check_value_type("keep_prob", keep_prob, [float], self.name)
self.keep_prob = validator.check_number_range("keep_prob", keep_prob, 0.0, 1.0, Rel.INC_BOTH, self.name)
self.forget_bias = validator.check_value_type("forget_bias", forget_bias, [float], self.name)
self.state_is_tuple = validator.check_value_type("state_is_tuple", state_is_tuple, [bool], self.name)
self.activation = validator.check_string("activation", activation, ['tanh'], self.name)
self.add_prim_attr("io_format", "ND")
def infer_shape(self, x_shape, h_shape, c_shape, w_shape, b_shape):
validator.check_integer("x rank", len(x_shape), 2, Rel.EQ, self.name)
validator.check_integer("h rank", len(h_shape), 2, Rel.EQ, self.name)
validator.check_integer("c rank", len(c_shape), 2, Rel.EQ, self.name)
validator.check_integer("w rank", len(w_shape), 2, Rel.EQ, self.name)
validator.check_integer("b rank", len(b_shape), 1, Rel.EQ, self.name)
validator.check("x_shape[0]", x_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[0]", c_shape[0], "h_shape[0]", h_shape[0], Rel.EQ, self.name)
validator.check("c_shape[1]", c_shape[1], "h_shape[1]", h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[1]", w_shape[1], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
validator.check("w_shape[0]", w_shape[0], "x_shape[1]+h_shape[1]", x_shape[1] + h_shape[1], Rel.EQ, self.name)
validator.check("b_shape[0]", b_shape[0], "4*h_shape[1]", 4 * h_shape[1], Rel.EQ, self.name)
ct_shape = c_shape
ht_shape = c_shape
it_shape = c_shape
jt_shape = c_shape
ft_shape = c_shape
ot_shape = c_shape
tanhct_shape = c_shape
return (ct_shape, ht_shape, it_shape, jt_shape, ft_shape, ot_shape, tanhct_shape)
def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype, b_dtype):
validator.check_tensor_type_same({"x_dtype": x_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"h_dtype": h_dtype}, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_type_same({"w_dtype": w_dtype}, [mstype.float16, mstype.float32], self.name)
args = {"c_dtype": c_dtype, "b_dtype": b_dtype}
validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)
return (c_dtype, mstype.float16, c_dtype, c_dtype, c_dtype, c_dtype, c_dtype)
class InTopK(PrimitiveWithInfer):
r"""
Whether the targets are in the top `k` predictions.
Args:
k (int): Specify the number of top elements to be used for computing precision.
Inputs:
- **x1** (Tensor) - A 2D Tensor defines the predictions of a batch of samples with float16 or float32 data type.
- **x2** (Tensor) - A 1D Tensor defines the labels of a batch of samples with int32 data type.
Outputs:
Tensor has 1 dimension of type bool and the same shape with `x2`. For labeling sample `i` in `x2`,
if the label in the first `k` predictions for sample `i` is in `x1`, then the value is True, otherwise False.
Examples:
>>> x1 = Tensor(np.array([[1, 8, 5, 2, 7], [4, 9, 1, 3, 5]]), mindspore.float32)
>>> x2 = Tensor(np.array([1, 3]), mindspore.int32)
>>> in_top_k = P.InTopK(3)
>>> result = in_top_k(x1, x2)
[True False]
"""
@prim_attr_register
def __init__(self, k):
"""Init InTopK"""
self.init_prim_io_names(inputs=['x1', 'x2', 'k'], outputs=['y'])
validator.check_value_type("k", k, [int], self.name)
def infer_dtype(self, x1_dtype, x2_dtype):
validator.check_tensor_type_same({"x1": x1_dtype}, (mstype.float16, mstype.float32,), self.name)
validator.check_tensor_type_same({"x2": x2_dtype}, (mstype.int32,), self.name)
return mstype.tensor_type(mstype.bool_)
def infer_shape(self, x1_shape, x2_shape):
validator.check("x1", len(x1_shape), "", 2, Rel.EQ, self.name)
validator.check("x2", len(x2_shape), "", 1, Rel.EQ, self.name)
validator.check("size of x2", x2_shape[0], "x1's first dimension", x1_shape[0], Rel.EQ, self.name)
return x2_shape
class LRN(PrimitiveWithInfer):
r"""
Local Response Normalization
Args:
depth_radius (int): Half-width of the 1-D normalization window. Shape of 0-D.
bias (float): An offset (usually positive to avoid dividing by 0).
alpha (float): A scale factor, usually positive.
beta (float): An exponent.
norm_region (str): Specify normalization region. Options: "ACROSS_CHANNELS". Default: "ACROSS_CHANNELS".
Inputs:
- **x** (Tensor) - A 4D Tensor with float16 or float32 data type.
Outputs:
Tensor, With shape and data type same as the input tensor.
Examples:
>>> x = Tensor(np.random.rand(1, 10, 4, 4)), mindspore.float32)
>>> lrn = P.LRN()
>>> lrn(x)
"""
@prim_attr_register
def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, norm_region="ACROSS_CHANNELS"):
"""Init LRN"""
self.init_prim_io_names(inputs=['x'], outputs=['y'])
validator.check_value_type("depth_radius", depth_radius, [int], self.name)
validator.check_value_type("bias", bias, [float], self.name)
validator.check_value_type("alpha", alpha, [float], self.name)
validator.check_value_type("beta", beta, [float], self.name)
validator.check_value_type("norm_region", norm_region, [str], self.name)
validator.check_string('norm_region', norm_region, ['ACROSS_CHANNELS'], self.name)
validator.check_integer("depth_radius", depth_radius, 0, Rel.GE, self.name)
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({"x": x_dtype}, (mstype.float16, mstype.float32,), self.name)
return x_dtype
def infer_shape(self, x_shape):
validator.check_integer("x_shape", len(x_shape), 4, Rel.EQ, self.name)
return x_shape
class CTCLossV2(PrimitiveWithInfer):
r"""
Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.
Note:
- Cudnn Uses label value of for the `blank`
Inputs:
- **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is
:math:`(max_time, batch_size, num_class)`. `num_class` should be `num_labels + 1` classes, `num_labels`
indicates the number of actual labels. Blank labels are reserved.
- **labels** (Tensor) - The labels Tensor should be a `1-D` tensor whose shape is
:math:`(\sigma{label_lengths})`
or `2-D` tensor whose shape is
:math:`(max_time, max{label_lengths})`
The type must be int32.
- **input_lengths** (Tensor) - A `1-D` input tensor whose shape is
:math:`(batch_size,)`. The values should be batch. The type must be int32.
- **label_lengths** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.
The type must be int32. Each value in the tensor should not greater than `max_time`.
Outputs:
- **loss** (Tensor) - A tensor containing log-probabilities, the shape is :math:`(batch_size)`, has the same
type with `inputs`.
- **gradient** (Tensor) - The gradient of `loss`, has the same type and shape with `inputs`.
Examples:
>>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)
>>> labels = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
>>> input_lengths = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> label_lengths = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> ctc_loss = P.CTCLossV2()
>>> output = ctc_loss(inputs, labels, input_lengths, label_lengths)
"""
@prim_attr_register
def __init__(self):
pass
def infer_dtype(self, input_dtype, labels_dtype, input_lengths_dtype, label_lengths_dtype):
validator.check_tensor_type_same({"input": input_dtype}, (mstype.float32,), self.name)
validator.check_tensor_type_same({"labels": labels_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"input_lengths": input_lengths_dtype}, (mstype.int32,), self.name)
validator.check_tensor_type_same({"target_lengths": label_lengths_dtype}, (mstype.int32,), self.name)
return mstype.float32, mstype.float32
def infer_shape(self, input_shape, labels_shape, input_lengths_shape, label_lengths_shape):
validator.check_integer("input shape", len(input_shape), 3, Rel.EQ, self.name)
validator.check_number_range("labels shape", len(labels_shape), 1, 2, Rel.INC_BOTH, self.name)
validator.check_integer("input lengths shape", len(input_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("label lengths shape", len(label_lengths_shape), 1, Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], input_lengths_shape[0], Rel.EQ, self.name)
validator.check_integer("input[1]", input_shape[1], label_lengths_shape[0], Rel.EQ, self.name)
return (input_shape[1],), input_shape
|
from __future__ import annotations
from collections.abc import Iterable
from reprlib import Repr
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Type
from uuid import UUID, uuid4
from restio.event import EventListener
from restio.fields.base import Field, T_co
from restio.shared import (
CURRENT_SESSION,
MODEL_INSTANTIATED_EVENT,
MODEL_PRE_UPDATE_EVENT,
MODEL_TYPE_REGISTRY,
MODEL_UPDATE_EVENT,
)
from restio.state import ModelState
if TYPE_CHECKING:
from restio.session import Session
def _check_model_type(obj: Optional[BaseModel]):
if not isinstance(obj, BaseModel):
raise TypeError("The provided object is not of type BaseModel.")
class ModelMeta:
__slots__ = ("init", "init_ignore_extra", "repr", "fields", "primary_keys", "alias")
init: bool
init_ignore_extra: bool
repr: bool
fields: Dict[str, Field]
primary_keys: Dict[str, Field]
alias: Optional[str]
def __init__(self):
self.init = True
self.init_ignore_extra = True
self.repr = True
self.fields = dict()
self.primary_keys = dict()
self.alias = None
# Meta attributes that don't get inherited from parent classes
__MODEL_META_NOT_INHERITED__ = ("alias",)
# Read-only meta attributes, can't be modified by model class
__MODEL_META_READONLY__ = ("fields", "primary_keys")
class BaseModelMeta(type):
__slots__ = ()
"""
BaseModel metaclass. Responsible to internally cache the data schema in a BaseModel
subclass by identifying fields and primary keys.
"""
def __new__(cls, name: str, bases: Tuple[Type, ...], dct: Dict[str, Any]):
# internal fields not initialized in BaseModel
dct["_internal_id"] = None
dct["_hash"] = None
dct["_listener"] = None
dct["_persistent_values"] = None
# prepares metadata for the model type
meta = ModelMeta()
dct["_meta"] = meta
def _update_meta(
_meta: Optional[ModelMeta],
extend: bool,
not_inherited: Tuple[str, ...] = tuple(),
):
if not _meta:
return
propagate_meta = (
set(meta.__slots__) - set(__MODEL_META_READONLY__) - set(not_inherited)
)
for meta_attribute in propagate_meta:
if not hasattr(_meta, meta_attribute):
continue
setattr(meta, meta_attribute, getattr(_meta, meta_attribute))
# excluded meta, needs to be propagated manually
if extend:
meta.fields.update(_meta.fields)
meta.primary_keys.update(_meta.primary_keys)
base: Type[BaseModel]
for base in bases:
if not hasattr(base, "_meta"):
continue
_update_meta(base._meta, True, __MODEL_META_NOT_INHERITED__)
_update_meta(dct.get("Meta", None), False)
# process class fields
for field_name, field_value in dct.items():
if not isinstance(field_value, Field):
continue
meta.fields[field_name] = field_value
if field_value.pk:
meta.primary_keys[field_name] = field_value
# set alias name to class name when None
name_alias = meta.alias or name
# validate if the alias is not duplicate
# the caveat here is that two classes with the same name in two
# different files will have a name collision and fail initializing
if name_alias in MODEL_TYPE_REGISTRY:
raise ValueError(
f"Model alias `{name_alias}` is already used by another class."
)
cls_object = super().__new__(cls, name, bases, dct)
# set the model alias to the model type
if name_alias != "BaseModel":
MODEL_TYPE_REGISTRY[name_alias] = cls_object
return cls_object
def __call__(self, *args, **kwargs):
instance: BaseModel = super().__call__(*args, **kwargs)
# stores the default after the constructor, if nothing has been set yet
# this is implemented here so that this is always called, regardless of the
# models with custom constructors calling or not super().__init__()
for field in instance._meta.fields.values():
field._store_default(instance, force=False)
instance._internal_id = uuid4()
instance._hash = hash((instance.__class__, str(instance._internal_id)))
instance._persistent_values = {}
instance._listener = EventListener()
instance._initialized = True
session = CURRENT_SESSION.get()
if session:
session._listener.dispatch(MODEL_INSTANTIATED_EVENT, instance)
return instance
_repr_obj: Repr = Repr()
_repr_obj.maxother = 200
class BaseModel(metaclass=BaseModelMeta):
"""
A representation of a remote object model.
BaseModel is an abstract class that should be extended to represent models incoming
from or outgoing to a remote REST API.
Models can exist independently from Sessions but contain an internal state that
indicates the status of the model within the current context. The Sessions are
responsible to control this state. Also, each model contains a set of control
attributes that indicate which fields are watched by restio internals. By default,
all Field descriptors in the model will become field attributes. Fields declared
with pk=True will be used by restio to optimize the caching of the models in a
Session.
Models that change over time will contain an internal dictionary with the latest
know persistent value of each field. This is done to guarantee fast rollback of the
values when the Session is invalid, and to also indicate which values might have
changed within the session scope. If a field is modified directly, the model will
intercept the change and save the older value into the persistent dictionary until
`_persist` is called. During a `_rollback` call, however, the stored values are
re-assigned to their original attributes. Each attribute change will also dispatch
an update event so that the session is aware of changes and manages the model's
internal state accordingly. The persistent dictionary (through the helper method
`is_field_modified`) can also be used by DAO's to verify which values where updated
prior to sending a request through the REST API, thus allowing for proper
optimization and minimizing chances of conflicting changes on the remote object.
All models automatically generate a random internal UUID when created. This UUID is
used internally for comparison purposes, and externally as an identity. Although
this attribute is not explicitly set as private, it should never be modified.
"""
# these are all initialized by the metaclass
_meta: ModelMeta
__state: ModelState = ModelState.UNBOUND
__primary_keys: Optional[Dict[str, Any]] = None
_initialized: bool = False
_internal_id: UUID
_hash: int
_persistent_values: Dict[str, Any]
_listener: EventListener
def __init__(self, **kwargs: T_co):
"""
Instantiates the model by matching `kwargs` parameters to field names.
Behavior is disabled when init=False in the model Meta class.
:param kwargs: The dictionary of keyword arguments matching the field names of
the model class.
:raises ValueError: When invalid arguments are provided.
"""
meta = self._meta
if not meta.init:
return
for arg_name, value in kwargs.items():
field_object = meta.fields.get(arg_name, None)
if not field_object:
if not meta.init_ignore_extra:
raise ValueError(
"Invalid argument provided to constructor of"
f" `{self.__class__.__name__}`: {arg_name}"
)
continue # pragma: no cover
if not field_object.init:
if not meta.init_ignore_extra:
raise ValueError(f"Attribute `{arg_name}` cannot be initialized.")
continue # pragma: no cover
field_object.__set__(self, value)
@property
def _state(self) -> ModelState:
"""
Returns the state of the current model.
:return: The ModelState representation.
"""
return self.__state
@_state.setter
def _state(self, state: ModelState):
self.__state = state
@property
def primary_keys(self) -> Dict[str, T_co]:
"""
Returns a dictionary containing all primary keys. The keys will be
ordered in the same order as they are declared in the model type,
also following the order in which they appear in class inheritance.
This property is optimized to minimize the number of iterations done
in the model instance by internalizing a cache with the latest retrieved
primary keys. This cache is reset for every modification of a primary
key and recovered during the next call to the property.
:return: The ordered tuple of values.
"""
if self.__primary_keys is None:
self.__primary_keys = self._load_primary_keys()
return self.__primary_keys
def _load_primary_keys(self) -> Dict[str, T_co]:
"""
Returns a dictionary containing the primary key fields (keys) and their
current values in the model (values). This operation will inspect the
instance and collect all current values on-spot.
:return: Dictionary of primary keys values.
"""
return {key: getattr(self, key) for key in self._meta.primary_keys}
def _reset_primary_keys(self):
"""
Resets the internal cache of primary keys for the instance.
"""
self.__primary_keys = None
def get_children(
self,
recursive: bool = False,
children: Optional[Set[BaseModel]] = None,
top_level: Optional[BaseModel] = None,
) -> Set[BaseModel]:
"""
Returns the list of all children of the current model. This algorithm checks in
runtime for all objects refered by the instance and that are part of fields
marked with depends_on=True. When `recursive` is True, then the algorithm will
recursively search through all children.
`children` and `top_level` are control variables that indicate which models
have already been inspected by this function, in order to avoid infinite
recursion if any circular dependency exists. In most cases, they should be left
empty.
:param recursive: If True, recursively searches for children. Returns only
first degree relationships otherwise. Defaults to False.
:param children: List of existing models already inspected.
:param top_level: The top-level model from where inspection started.
:return: The list of children.
"""
if children is None:
children = set()
if top_level:
if self == top_level:
return children
children.add(self)
else:
top_level = self
for value in self.dependency_fields.values():
def check(child: Optional[BaseModel]):
# this can happen when the field allows none
if not child or child in children: # type: ignore
return
if recursive:
child.get_children(recursive, children, top_level)
else:
children.add(child)
# iterables are only supported if the values are not iterables - there is
# no recursiveness
if isinstance(value, Iterable):
value: Iterable[Any]
for item in value:
check(item)
else:
check(value)
return children
@property
def fields(self) -> Dict[str, Any]:
"""
Returns the values of each field in the model instance.
:return: A dict with keys containing the string names of the fields,
and values containing the value of the corresponding field.
"""
return {k: getattr(self, k) for k in self._filter_fields(lambda v: True)}
@property
def dependency_fields(self) -> Dict[str, Any]:
"""
Returns the values of each field that have relationship with other models.
:return: The dictionary of fields and their values
"""
return {
k: getattr(self, k) for k in self._filter_fields(lambda v: v.depends_on)
}
def is_field_modified(self, field_name: str) -> bool:
"""
Indicates of field with name `field_name` has been modified.
:param field_name: The name of the field.
:raises ValueError: When the field name does not exist.
:return: True if field is modified, False otherwise.
"""
if field_name not in self._meta.fields:
raise ValueError(
f"Field `{field_name}` does not exist in model"
" `{self.__class__.__name__}`."
)
return field_name in self._persistent_values
def _filter_fields(self, filt: Callable[[Field], bool]):
return {k: v for k, v in self._meta.fields.items() if filt(v)}
def _rollback(self):
"""
Restore the persistent values in the model to their original attributes.
"""
for attr, value in list(self._persistent_values.items()):
setattr(self, attr, value)
self._persist()
def _persist(self):
"""
Persists the current attribute values by emptying the internal persistent
dictionary. Once this is called, it is not possible to rollback to the old
values anymore. It is recommended that this method should only be called by the
party that persisted the values on the remote server.
"""
self._persistent_values = {}
def _pre_update(self, field: Field[T_co], value: T_co):
self._listener.dispatch(MODEL_PRE_UPDATE_EVENT, self, field, value)
def _update(self, field: Field[T_co], value: T_co):
if field.pk:
self._reset_primary_keys()
self._listener.dispatch(MODEL_UPDATE_EVENT, self, field, value)
def _update_persistent_values(self, field: Field[T_co], value: T_co):
name: str = field.name
if name in self._persistent_values:
if value == self._persistent_values[name]:
del self._persistent_values[name]
else:
mutable_fields = self.fields
if value != mutable_fields[name]:
self._persistent_values[name] = mutable_fields[name]
def __eq__(self, other: BaseModel) -> bool:
return isinstance(other, self.__class__) and self._hash == other._hash
def __repr__(self) -> str:
if not self._meta.repr:
return super().__repr__()
def get_field_repr(field: str):
value = getattr(self, field)
return f"{field}={_repr_obj.repr(value)}"
repr_args: List[str] = [
get_field_repr(n) for n in self._filter_fields(lambda x: x.repr)
]
return f"{self.__class__.__name__}({", ".join(repr_args)})"
def __hash__(self) -> int:
return self._hash
|
from __future__ import annotations
from collections.abc import Iterable
from reprlib import Repr
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Type
from uuid import UUID, uuid4
from restio.event import EventListener
from restio.fields.base import Field, T_co
from restio.shared import (
CURRENT_SESSION,
MODEL_INSTANTIATED_EVENT,
MODEL_PRE_UPDATE_EVENT,
MODEL_TYPE_REGISTRY,
MODEL_UPDATE_EVENT,
)
from restio.state import ModelState
if TYPE_CHECKING:
from restio.session import Session
def _check_model_type(obj: Optional[BaseModel]):
if not isinstance(obj, BaseModel):
raise TypeError("The provided object is not of type BaseModel.")
class ModelMeta:
__slots__ = ("init", "init_ignore_extra", "repr", "fields", "primary_keys", "alias")
init: bool
init_ignore_extra: bool
repr: bool
fields: Dict[str, Field]
primary_keys: Dict[str, Field]
alias: Optional[str]
def __init__(self):
self.init = True
self.init_ignore_extra = True
self.repr = True
self.fields = dict()
self.primary_keys = dict()
self.alias = None
# Meta attributes that don't get inherited from parent classes
__MODEL_META_NOT_INHERITED__ = ("alias",)
# Read-only meta attributes, can't be modified by model class
__MODEL_META_READONLY__ = ("fields", "primary_keys")
class BaseModelMeta(type):
__slots__ = ()
"""
BaseModel metaclass. Responsible to internally cache the data schema in a BaseModel
subclass by identifying fields and primary keys.
"""
def __new__(cls, name: str, bases: Tuple[Type, ...], dct: Dict[str, Any]):
# internal fields not initialized in BaseModel
dct["_internal_id"] = None
dct["_hash"] = None
dct["_listener"] = None
dct["_persistent_values"] = None
# prepares metadata for the model type
meta = ModelMeta()
dct["_meta"] = meta
def _update_meta(
_meta: Optional[ModelMeta],
extend: bool,
not_inherited: Tuple[str, ...] = tuple(),
):
if not _meta:
return
propagate_meta = (
set(meta.__slots__) - set(__MODEL_META_READONLY__) - set(not_inherited)
)
for meta_attribute in propagate_meta:
if not hasattr(_meta, meta_attribute):
continue
setattr(meta, meta_attribute, getattr(_meta, meta_attribute))
# excluded meta, needs to be propagated manually
if extend:
meta.fields.update(_meta.fields)
meta.primary_keys.update(_meta.primary_keys)
base: Type[BaseModel]
for base in bases:
if not hasattr(base, "_meta"):
continue
_update_meta(base._meta, True, __MODEL_META_NOT_INHERITED__)
_update_meta(dct.get("Meta", None), False)
# process class fields
for field_name, field_value in dct.items():
if not isinstance(field_value, Field):
continue
meta.fields[field_name] = field_value
if field_value.pk:
meta.primary_keys[field_name] = field_value
# set alias name to class name when None
name_alias = meta.alias or name
# validate if the alias is not duplicate
# the caveat here is that two classes with the same name in two
# different files will have a name collision and fail initializing
if name_alias in MODEL_TYPE_REGISTRY:
raise ValueError(
f"Model alias `{name_alias}` is already used by another class."
)
cls_object = super().__new__(cls, name, bases, dct)
# set the model alias to the model type
if name_alias != "BaseModel":
MODEL_TYPE_REGISTRY[name_alias] = cls_object
return cls_object
def __call__(self, *args, **kwargs):
instance: BaseModel = super().__call__(*args, **kwargs)
# stores the default after the constructor, if nothing has been set yet
# this is implemented here so that this is always called, regardless of the
# models with custom constructors calling or not super().__init__()
for field in instance._meta.fields.values():
field._store_default(instance, force=False)
instance._internal_id = uuid4()
instance._hash = hash((instance.__class__, str(instance._internal_id)))
instance._persistent_values = {}
instance._listener = EventListener()
instance._initialized = True
session = CURRENT_SESSION.get()
if session:
session._listener.dispatch(MODEL_INSTANTIATED_EVENT, instance)
return instance
_repr_obj: Repr = Repr()
_repr_obj.maxother = 200
class BaseModel(metaclass=BaseModelMeta):
"""
A representation of a remote object model.
BaseModel is an abstract class that should be extended to represent models incoming
from or outgoing to a remote REST API.
Models can exist independently from Sessions but contain an internal state that
indicates the status of the model within the current context. The Sessions are
responsible to control this state. Also, each model contains a set of control
attributes that indicate which fields are watched by restio internals. By default,
all Field descriptors in the model will become field attributes. Fields declared
with pk=True will be used by restio to optimize the caching of the models in a
Session.
Models that change over time will contain an internal dictionary with the latest
know persistent value of each field. This is done to guarantee fast rollback of the
values when the Session is invalid, and to also indicate which values might have
changed within the session scope. If a field is modified directly, the model will
intercept the change and save the older value into the persistent dictionary until
`_persist` is called. During a `_rollback` call, however, the stored values are
re-assigned to their original attributes. Each attribute change will also dispatch
an update event so that the session is aware of changes and manages the model's
internal state accordingly. The persistent dictionary (through the helper method
`is_field_modified`) can also be used by DAO's to verify which values where updated
prior to sending a request through the REST API, thus allowing for proper
optimization and minimizing chances of conflicting changes on the remote object.
All models automatically generate a random internal UUID when created. This UUID is
used internally for comparison purposes, and externally as an identity. Although
this attribute is not explicitly set as private, it should never be modified.
"""
# these are all initialized by the metaclass
_meta: ModelMeta
__state: ModelState = ModelState.UNBOUND
__primary_keys: Optional[Dict[str, Any]] = None
_initialized: bool = False
_internal_id: UUID
_hash: int
_persistent_values: Dict[str, Any]
_listener: EventListener
def __init__(self, **kwargs: T_co):
"""
Instantiates the model by matching `kwargs` parameters to field names.
Behavior is disabled when init=False in the model Meta class.
:param kwargs: The dictionary of keyword arguments matching the field names of
the model class.
:raises ValueError: When invalid arguments are provided.
"""
meta = self._meta
if not meta.init:
return
for arg_name, value in kwargs.items():
field_object = meta.fields.get(arg_name, None)
if not field_object:
if not meta.init_ignore_extra:
raise ValueError(
"Invalid argument provided to constructor of"
f" `{self.__class__.__name__}`: {arg_name}"
)
continue # pragma: no cover
if not field_object.init:
if not meta.init_ignore_extra:
raise ValueError(f"Attribute `{arg_name}` cannot be initialized.")
continue # pragma: no cover
field_object.__set__(self, value)
@property
def _state(self) -> ModelState:
"""
Returns the state of the current model.
:return: The ModelState representation.
"""
return self.__state
@_state.setter
def _state(self, state: ModelState):
self.__state = state
@property
def primary_keys(self) -> Dict[str, T_co]:
"""
Returns a dictionary containing all primary keys. The keys will be
ordered in the same order as they are declared in the model type,
also following the order in which they appear in class inheritance.
This property is optimized to minimize the number of iterations done
in the model instance by internalizing a cache with the latest retrieved
primary keys. This cache is reset for every modification of a primary
key and recovered during the next call to the property.
:return: The ordered tuple of values.
"""
if self.__primary_keys is None:
self.__primary_keys = self._load_primary_keys()
return self.__primary_keys
def _load_primary_keys(self) -> Dict[str, T_co]:
"""
Returns a dictionary containing the primary key fields (keys) and their
current values in the model (values). This operation will inspect the
instance and collect all current values on-spot.
:return: Dictionary of primary keys values.
"""
return {key: getattr(self, key) for key in self._meta.primary_keys}
def _reset_primary_keys(self):
"""
Resets the internal cache of primary keys for the instance.
"""
self.__primary_keys = None
def get_children(
self,
recursive: bool = False,
children: Optional[Set[BaseModel]] = None,
top_level: Optional[BaseModel] = None,
) -> Set[BaseModel]:
"""
Returns the list of all children of the current model. This algorithm checks in
runtime for all objects refered by the instance and that are part of fields
marked with depends_on=True. When `recursive` is True, then the algorithm will
recursively search through all children.
`children` and `top_level` are control variables that indicate which models
have already been inspected by this function, in order to avoid infinite
recursion if any circular dependency exists. In most cases, they should be left
empty.
:param recursive: If True, recursively searches for children. Returns only
first degree relationships otherwise. Defaults to False.
:param children: List of existing models already inspected.
:param top_level: The top-level model from where inspection started.
:return: The list of children.
"""
if children is None:
children = set()
if top_level:
if self == top_level:
return children
children.add(self)
else:
top_level = self
for value in self.dependency_fields.values():
def check(child: Optional[BaseModel]):
# this can happen when the field allows none
if not child or child in children: # type: ignore
return
if recursive:
child.get_children(recursive, children, top_level)
else:
children.add(child)
# iterables are only supported if the values are not iterables - there is
# no recursiveness
if isinstance(value, Iterable):
value: Iterable[Any]
for item in value:
check(item)
else:
check(value)
return children
@property
def fields(self) -> Dict[str, Any]:
"""
Returns the values of each field in the model instance.
:return: A dict with keys containing the string names of the fields,
and values containing the value of the corresponding field.
"""
return {k: getattr(self, k) for k in self._filter_fields(lambda v: True)}
@property
def dependency_fields(self) -> Dict[str, Any]:
"""
Returns the values of each field that have relationship with other models.
:return: The dictionary of fields and their values
"""
return {
k: getattr(self, k) for k in self._filter_fields(lambda v: v.depends_on)
}
def is_field_modified(self, field_name: str) -> bool:
"""
Indicates of field with name `field_name` has been modified.
:param field_name: The name of the field.
:raises ValueError: When the field name does not exist.
:return: True if field is modified, False otherwise.
"""
if field_name not in self._meta.fields:
raise ValueError(
f"Field `{field_name}` does not exist in model"
" `{self.__class__.__name__}`."
)
return field_name in self._persistent_values
def _filter_fields(self, filt: Callable[[Field], bool]):
return {k: v for k, v in self._meta.fields.items() if filt(v)}
def _rollback(self):
"""
Restore the persistent values in the model to their original attributes.
"""
for attr, value in list(self._persistent_values.items()):
setattr(self, attr, value)
self._persist()
def _persist(self):
"""
Persists the current attribute values by emptying the internal persistent
dictionary. Once this is called, it is not possible to rollback to the old
values anymore. It is recommended that this method should only be called by the
party that persisted the values on the remote server.
"""
self._persistent_values = {}
def _pre_update(self, field: Field[T_co], value: T_co):
self._listener.dispatch(MODEL_PRE_UPDATE_EVENT, self, field, value)
def _update(self, field: Field[T_co], value: T_co):
if field.pk:
self._reset_primary_keys()
self._listener.dispatch(MODEL_UPDATE_EVENT, self, field, value)
def _update_persistent_values(self, field: Field[T_co], value: T_co):
name: str = field.name
if name in self._persistent_values:
if value == self._persistent_values[name]:
del self._persistent_values[name]
else:
mutable_fields = self.fields
if value != mutable_fields[name]:
self._persistent_values[name] = mutable_fields[name]
def __eq__(self, other: BaseModel) -> bool:
return isinstance(other, self.__class__) and self._hash == other._hash
def __repr__(self) -> str:
if not self._meta.repr:
return super().__repr__()
def get_field_repr(field: str):
value = getattr(self, field)
return f"{field}={_repr_obj.repr(value)}"
repr_args: List[str] = [
get_field_repr(n) for n in self._filter_fields(lambda x: x.repr)
]
return f"{self.__class__.__name__}({', '.join(repr_args)})"
def __hash__(self) -> int:
return self._hash
|
"""Example Extorter, useful as a starting point"""
import typing
import logging
import dataclasses
import datetime
# 3rdparty
import slugify
# We use ibflex
from ibflex import parser, FlexStatement, CashAction
from coolbeans.extort.base import ExtortionProtocol
from coolbeans.tools.seeds import Trade, Transfer, Expense, Income, EventDetail
logger = logging.getLogger(__name__)
def trade_key(trade):
if trade.openCloseIndicator:
o = trade.openCloseIndicator.name + ':'
else:
o = ''
return f"{o}{trade.tradeDate.strftime("%Y-%m-%d")}:{trade.ibOrderID}"
def clean_symbol(symbol: str) -> str:
symbol = slugify.slugify(symbol, separator='_')
if symbol[0].isdigit():
symbol = "X" + symbol
symbol = symbol.upper()
return symbol
class Extorter(ExtortionProtocol):
FILE_OPEN_MODE = None # This requires a file-name, not a
ib_account_id = ""
def extort(self, stream: typing.Union[typing.IO[typing.AnyStr], str]):
"""Extract as much information as possible from the workbook"""
for statement in parser.parse(stream).FlexStatements:
for record in self.extract_cash(statement):
yield dataclasses.asdict(record)
for trade in self.extract_trades(statement):
yield dataclasses.asdict(trade)
@staticmethod
def extract_cash(statement: FlexStatement):
"""
Args:
statement: The Statement to extract entries from
Returns:
iterator of DataClass instances for these records
"""
for record in statement.CashTransactions:
date = record.dateTime
if record.type in (
CashAction.DEPOSITWITHDRAW,
):
yield Transfer(
id=record.transactionID,
date=date,
amount=record.amount,
currency=record.currency,
subaccount=record.accountId,
narration=record.description,
event_detail=EventDetail.TRANSFER_DEPOSIT.name if record.amount > 0 else EventDetail.TRANSFER_WITHDRAWAL.name,
meta={
'type': record.type.value,
'rate': record.fxRateToBase
}
)
elif record.amount < 0:
event_detail = EventDetail.EXPENSE_FEES
if record.type in (CashAction.BONDINTPAID, CashAction.BROKERINTPAID):
event_detail = EventDetail.EXPENSE_INTEREST
if record.type == CashAction.WHTAX:
event_detail = EventDetail.EXPENSE_TAX
yield Expense(
id=record.transactionID,
date=date,
amount=record.amount,
event_detail=event_detail,
currency=record.currency,
subaccount=record.accountId,
narration=record.description,
meta={
'type': record.type.value,
'rate': record.fxRateToBase
}
)
else:
yield Income(
id=record.transactionID,
date=date,
amount=record.amount,
currency=record.currency,
subaccount=record.accountId,
narration=record.description,
meta={
'type': record.type.value,
'rate': record.fxRateToBase
}
)
@staticmethod
def extract_trades(statement: FlexStatement):
"""Pull Trades from a FlexStatement
"""
by_order: typing.Dict[str, Trade] = {}
for trade in statement.Trades:
key = trade_key(trade)
assert key.strip(), f"Invalid Key {len(key)}"
if not trade.openCloseIndicator:
# This isn't a trade at all.
continue
if key in by_order:
combined = by_order[key]
combined.add_trade(
quantity=trade.quantity * trade.multiplier,
price=trade.tradePrice,
fees=trade.ibCommission
)
else:
seed = Trade(
id=key,
date=trade.tradeDate,
price=trade.tradePrice,
currency=trade.currency,
quantity=trade.quantity * trade.multiplier,
commodity=clean_symbol(trade.symbol),
fees=trade.ibCommission,
fees_currency=trade.ibCommissionCurrency,
subaccount=trade.accountId,
event_detail=EventDetail.TRADE_OPEN if trade.openCloseIndicator.name == 'OPEN' else EventDetail.TRADE_CLOSE,
meta={
'exchange': trade.exchange,
'symbol': trade.symbol,
}
)
by_order[key] = seed
for trade in by_order.values():
yield trade
# if trade.securityID is None and "." in trade.symbol:
# # FOREX Trade, not really a valid Symbol at all
# # TODO: Better check than blank securityID
# # Usually [currency].[commodity]. For example GBP.JPY
# # In that case trade.currency is JPY, so we just need to parse out the GBP part
# safe_symbol, _ = trade.symbol.split('.')
# else:
# safe_symbol = self.clean_symbol(trade.symbol)
|
"""Example Extorter, useful as a starting point"""
import typing
import logging
import dataclasses
import datetime
# 3rdparty
import slugify
# We use ibflex
from ibflex import parser, FlexStatement, CashAction
from coolbeans.extort.base import ExtortionProtocol
from coolbeans.tools.seeds import Trade, Transfer, Expense, Income, EventDetail
logger = logging.getLogger(__name__)
def trade_key(trade):
if trade.openCloseIndicator:
o = trade.openCloseIndicator.name + ':'
else:
o = ''
return f"{o}{trade.tradeDate.strftime('%Y-%m-%d')}:{trade.ibOrderID}"
def clean_symbol(symbol: str) -> str:
symbol = slugify.slugify(symbol, separator='_')
if symbol[0].isdigit():
symbol = "X" + symbol
symbol = symbol.upper()
return symbol
class Extorter(ExtortionProtocol):
FILE_OPEN_MODE = None # This requires a file-name, not a
ib_account_id = ""
def extort(self, stream: typing.Union[typing.IO[typing.AnyStr], str]):
"""Extract as much information as possible from the workbook"""
for statement in parser.parse(stream).FlexStatements:
for record in self.extract_cash(statement):
yield dataclasses.asdict(record)
for trade in self.extract_trades(statement):
yield dataclasses.asdict(trade)
@staticmethod
def extract_cash(statement: FlexStatement):
"""
Args:
statement: The Statement to extract entries from
Returns:
iterator of DataClass instances for these records
"""
for record in statement.CashTransactions:
date = record.dateTime
if record.type in (
CashAction.DEPOSITWITHDRAW,
):
yield Transfer(
id=record.transactionID,
date=date,
amount=record.amount,
currency=record.currency,
subaccount=record.accountId,
narration=record.description,
event_detail=EventDetail.TRANSFER_DEPOSIT.name if record.amount > 0 else EventDetail.TRANSFER_WITHDRAWAL.name,
meta={
'type': record.type.value,
'rate': record.fxRateToBase
}
)
elif record.amount < 0:
event_detail = EventDetail.EXPENSE_FEES
if record.type in (CashAction.BONDINTPAID, CashAction.BROKERINTPAID):
event_detail = EventDetail.EXPENSE_INTEREST
if record.type == CashAction.WHTAX:
event_detail = EventDetail.EXPENSE_TAX
yield Expense(
id=record.transactionID,
date=date,
amount=record.amount,
event_detail=event_detail,
currency=record.currency,
subaccount=record.accountId,
narration=record.description,
meta={
'type': record.type.value,
'rate': record.fxRateToBase
}
)
else:
yield Income(
id=record.transactionID,
date=date,
amount=record.amount,
currency=record.currency,
subaccount=record.accountId,
narration=record.description,
meta={
'type': record.type.value,
'rate': record.fxRateToBase
}
)
@staticmethod
def extract_trades(statement: FlexStatement):
"""Pull Trades from a FlexStatement
"""
by_order: typing.Dict[str, Trade] = {}
for trade in statement.Trades:
key = trade_key(trade)
assert key.strip(), f"Invalid Key {len(key)}"
if not trade.openCloseIndicator:
# This isn't a trade at all.
continue
if key in by_order:
combined = by_order[key]
combined.add_trade(
quantity=trade.quantity * trade.multiplier,
price=trade.tradePrice,
fees=trade.ibCommission
)
else:
seed = Trade(
id=key,
date=trade.tradeDate,
price=trade.tradePrice,
currency=trade.currency,
quantity=trade.quantity * trade.multiplier,
commodity=clean_symbol(trade.symbol),
fees=trade.ibCommission,
fees_currency=trade.ibCommissionCurrency,
subaccount=trade.accountId,
event_detail=EventDetail.TRADE_OPEN if trade.openCloseIndicator.name == 'OPEN' else EventDetail.TRADE_CLOSE,
meta={
'exchange': trade.exchange,
'symbol': trade.symbol,
}
)
by_order[key] = seed
for trade in by_order.values():
yield trade
# if trade.securityID is None and "." in trade.symbol:
# # FOREX Trade, not really a valid Symbol at all
# # TODO: Better check than blank securityID
# # Usually [currency].[commodity]. For example GBP.JPY
# # In that case trade.currency is JPY, so we just need to parse out the GBP part
# safe_symbol, _ = trade.symbol.split('.')
# else:
# safe_symbol = self.clean_symbol(trade.symbol)
|
#
# Copyright(c) 2019-2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import random
from itertools import permutations
import pytest
from api.cas.ioclass_config import IoClass
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.filesystem.file import File
from test_utils.os_utils import sync, Udev
from .io_class_common import *
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_lba():
"""Write data to random lba and check if it is cached according to range
defined in ioclass rule"""
cache, core = prepare()
ioclass_id = 1
min_cached_lba = 56
max_cached_lba = 200
iterations = 100
dd_size = Size(1, Unit.Blocks512)
dd_count = 1
# Prepare ioclass config
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"lba:ge:{min_cached_lba}&lba:le:{max_cached_lba}&done",
ioclass_config_path=ioclass_config_path,
)
# Prepare cache for test
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
cache.flush_cache()
# Check if lbas from defined range are cached
dirty_count = 0
# '8' step is set to prevent writing cache line more than once
TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.")
for lba in range(min_cached_lba, max_cached_lba, 8):
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.seek(lba)
)
dd.run()
sync()
dirty_count += 1
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != dirty_count:
TestRun.LOGGER.error(f"LBA {lba} not cached")
cache.flush_cache()
# Check if lba outside of defined range are not cached
TestRun.LOGGER.info(f"Writing to random sectors outside of cached range.")
for i in range(iterations):
rand_lba = random.randrange(2000)
if min_cached_lba <= rand_lba <= max_cached_lba:
continue
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.seek(rand_lba)
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.LOGGER.error(f"Inappropriately cached lba: {rand_lba}")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_request_size():
cache, core = prepare()
ioclass_id = 1
iterations = 100
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"request_size:ge:8192&request_size:le:16384&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
Udev.disable()
# Check if requests with appropriate size are cached
TestRun.LOGGER.info(
f"Check if requests with size within defined range are cached"
)
cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)]
for i in range(iterations):
cache.flush_cache()
req_size = random.choice(cached_req_sizes)
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(1)
.block_size(req_size)
.oflag("direct")
)
dd.run()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value:
TestRun.fail("Incorrect number of dirty blocks!")
cache.flush_cache()
# Check if requests with inappropriate size are not cached
TestRun.LOGGER.info(
f"Check if requests with size outside defined range are not cached"
)
not_cached_req_sizes = [
Size(1, Unit.Blocks4096),
Size(8, Unit.Blocks4096),
Size(16, Unit.Blocks4096),
]
for i in range(iterations):
req_size = random.choice(not_cached_req_sizes)
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(1)
.block_size(req_size)
.oflag("direct")
)
dd.run()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.fail("Dirty data present!")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", list(Filesystem) + [False])
def test_ioclass_direct(filesystem):
"""
Perform buffered/direct IO to/from files or raw block device.
Data from buffered IO should be cached.
Data from buffered IO should not be cached and if performed to/from already cached data
should cause reclassification to unclassified IO class.
"""
cache, core = prepare()
Udev.disable()
ioclass_id = 1
io_size = Size(random.randint(1000, 2000), Unit.Blocks4096)
# direct IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule="direct",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
fio = (
Fio().create_command()
.io_engine(IoEngine.libaio)
.size(io_size)
.offset(io_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/tmp_file" if filesystem else core.system_path)
)
if filesystem:
TestRun.LOGGER.info(
f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at"
f" {mountpoint}"
)
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
else:
TestRun.LOGGER.info("Testing on raw exported object")
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
TestRun.LOGGER.info(f"Buffered writes to {"file" if filesystem else "device"}")
fio.run()
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy:
TestRun.fail("Buffered writes were cached!\n"
f"Expected: {base_occupancy}, actual: {new_occupancy}")
TestRun.LOGGER.info(f"Direct writes to {"file" if filesystem else "device"}")
fio.direct()
fio.run()
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + io_size:
TestRun.fail("Wrong number of direct writes was cached!\n"
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
TestRun.LOGGER.info(f"Buffered reads from {"file" if filesystem else "device"}")
fio.remove_param("readwrite").remove_param("direct")
fio.read_write(ReadWrite.read)
fio.run()
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy:
TestRun.fail("Buffered reads did not cause reclassification!"
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}")
TestRun.LOGGER.info(f"Direct reads from {"file" if filesystem else "device"}")
fio.direct()
fio.run()
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + io_size:
TestRun.fail("Wrong number of direct reads was cached!\n"
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_metadata(filesystem):
"""
Perform operations on files that cause metadata update.
Determine if every such operation results in increased writes to cached metadata.
Exact values may not be tested as each file system has different metadata structure.
"""
cache, core = prepare()
Udev.disable()
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# metadata IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule="metadata&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
requests_to_metadata_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
TestRun.LOGGER.info("Creating 20 test files")
files = []
for i in range(1, 21):
file_path = f"{mountpoint}/test_file_{i}"
dd = (
Dd()
.input("/dev/urandom")
.output(file_path)
.count(random.randint(5, 50))
.block_size(Size(1, Unit.MebiByte))
.oflag("sync")
)
dd.run()
files.append(File(file_path))
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while creating files!")
requests_to_metadata_before = requests_to_metadata_after
TestRun.LOGGER.info("Renaming all test files")
for file in files:
file.move(f"{file.full_path}_renamed")
sync()
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while renaming files!")
requests_to_metadata_before = requests_to_metadata_after
test_dir_path = f"{mountpoint}/test_dir"
TestRun.LOGGER.info(f"Creating directory {test_dir_path}")
fs_utils.create_directory(path=test_dir_path)
TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
for file in files:
file.move(test_dir_path)
sync()
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while moving files!")
TestRun.LOGGER.info(f"Removing {test_dir_path}")
fs_utils.remove(path=test_dir_path, force=True, recursive=True)
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while deleting directory with files!")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_id_as_condition(filesystem):
"""
Load config in which IO class ids are used as conditions in other IO class definitions.
Check if performed IO is properly classified.
"""
cache, core = prepare()
Udev.disable()
base_dir_path = f"{mountpoint}/base_dir"
ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))
# directory condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"directory:{base_dir_path}",
ioclass_config_path=ioclass_config_path,
)
# file size condition
ioclass_config.add_ioclass(
ioclass_id=2,
eviction_priority=1,
allocation=True,
rule=f"file_size:eq:{ioclass_file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
# direct condition
ioclass_config.add_ioclass(
ioclass_id=3,
eviction_priority=1,
allocation=True,
rule="direct",
ioclass_config_path=ioclass_config_path,
)
# IO class 1 OR 2 condition
ioclass_config.add_ioclass(
ioclass_id=4,
eviction_priority=1,
allocation=True,
rule="io_class:1|io_class:2",
ioclass_config_path=ioclass_config_path,
)
# IO class 4 AND file size condition (same as IO class 2)
ioclass_config.add_ioclass(
ioclass_id=5,
eviction_priority=1,
allocation=True,
rule=f"io_class:4&file_size:eq:{ioclass_file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
# IO class 3 condition
ioclass_config.add_ioclass(
ioclass_id=6,
eviction_priority=1,
allocation=True,
rule="io_class:3",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(base_dir_path)
sync()
# IO fulfilling IO class 1 condition (and not IO class 2)
# Should be classified as IO class 4
base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(non_ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_1")
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
if new_occupancy != base_occupancy + non_ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + non_ioclass_file_size}, actual: {new_occupancy}")
# IO fulfilling IO class 2 condition (and not IO class 1)
# Should be classified as IO class 5
base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file_2")
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
# IO fulfilling IO class 1 and 2 conditions
# Should be classified as IO class 5
base_occupancy = new_occupancy
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_3")
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
# Same IO but direct
# Should be classified as IO class 6
base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_3")
.direct()
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_conditions_or(filesystem):
"""
Load config with IO class combining 5 contradicting conditions connected by OR operator.
Check if every IO fulfilling one condition is classified properly.
"""
cache, core = prepare()
Udev.disable()
# directories OR condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
for i in range(1, 6):
fs_utils.create_directory(f"{mountpoint}/dir{i}")
sync()
# Perform IO fulfilling each condition and check if occupancy raises
for i in range(1, 6):
file_size = Size(random.randint(25, 50), Unit.MebiByte)
base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(file_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/dir{i}/test_file")
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size:
TestRun.fail("Occupancy has not increased correctly!\n"
f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_conditions_and(filesystem):
"""
Load config with IO class combining 5 conditions contradicting at least one other condition
connected by AND operator.
Check if every IO fulfilling one of the conditions is not classified.
"""
cache, core = prepare()
Udev.disable()
file_size = Size(random.randint(25, 50), Unit.MebiByte)
file_size_bytes = int(file_size.get_value(Unit.Byte))
# directories OR condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&"
f"file_size:eq:{file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
# Perform IO
for size in [file_size, file_size + Size(1, Unit.MebiByte), file_size - Size(1, Unit.MebiByte)]:
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file")
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
if new_occupancy != base_occupancy:
TestRun.fail("Unexpected occupancy increase!\n"
f"Expected: {base_occupancy}, actual: {new_occupancy}")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_effective_ioclass(filesystem):
"""
title: Effective IO class with multiple non-exclusive conditions
description: |
Test CAS ability to properly classify IO fulfilling multiple conditions based on
IO class ids and presence of '&done' annotation in IO class rules
pass_criteria:
- In every iteration first IO is classified to the last in order IO class
- In every iteration second IO is classified to the IO class with '&done' annotation
"""
with TestRun.LOGGER.step(f"Test prepare"):
cache, core = prepare()
Udev.disable()
file_size = Size(10, Unit.Blocks4096)
file_size_bytes = int(file_size.get_value(Unit.Byte))
test_dir = f"{mountpoint}/test"
rules = ["direct", # rule contradicting other rules
f"directory:{test_dir}",
f"file_size:le:{2 * file_size_bytes}",
f"file_size:ge:{file_size_bytes // 2}"]
with TestRun.LOGGER.step(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}"):
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(test_dir)
sync()
for i, permutation in TestRun.iteration(enumerate(permutations(range(1, 5)), start=1)):
with TestRun.LOGGER.step("Load IO classes in order specified by permutation"):
load_io_classes_in_permutation_order(rules, permutation, cache)
io_class_id = 3 if rules[permutation.index(4)] == "direct" else 4
with TestRun.LOGGER.step("Perform IO fulfilling the non-contradicting conditions"):
base_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
fio = (Fio().create_command()
.io_engine(IoEngine.libaio)
.size(file_size)
.read_write(ReadWrite.write)
.target(f"{test_dir}/test_file{i}"))
fio.run()
sync()
with TestRun.LOGGER.step("Check if IO was properly classified "
"(to the last non-contradicting IO class)"):
new_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size:
TestRun.LOGGER.error("Wrong IO classification!\n"
f"Expected: {base_occupancy + file_size}, "
f"actual: {new_occupancy}")
with TestRun.LOGGER.step("Add '&done' to the second in order non-contradicting condition"):
io_class_id = add_done_to_second_non_exclusive_condition(rules, permutation, cache)
with TestRun.LOGGER.step("Repeat IO"):
base_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
fio.run()
sync()
with TestRun.LOGGER.step("Check if IO was properly classified "
"(to the IO class with '&done' annotation)"):
new_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size:
TestRun.LOGGER.error("Wrong IO classification!\n"
f"Expected: {base_occupancy + file_size}, "
f"actual: {new_occupancy}")
def load_io_classes_in_permutation_order(rules, permutation, cache):
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
# To make test more precise all workload except of tested ioclass should be
# put in pass-through mode
ioclass_list = [IoClass.default(allocation=False)]
for n in range(len(rules)):
ioclass_list.append(IoClass(class_id=permutation[n], rule=rules[n]))
IoClass.save_list_to_config_file(ioclass_list,
add_default_rule=False,
ioclass_config_path=ioclass_config_path)
casadm.load_io_classes(cache.cache_id, file=ioclass_config_path)
def add_done_to_second_non_exclusive_condition(rules, permutation, cache):
non_exclusive_conditions = 0
second_class_id = 1
while True:
idx = permutation.index(second_class_id)
if rules[idx] != "direct":
non_exclusive_conditions += 1
if non_exclusive_conditions == 2:
break
second_class_id += 1
fs_utils.replace_first_pattern_occurrence(ioclass_config_path,
rules[idx], f"{rules[idx]}&done")
sync()
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
return second_class_id
|
#
# Copyright(c) 2019-2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import random
from itertools import permutations
import pytest
from api.cas.ioclass_config import IoClass
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
from test_tools import fs_utils
from test_tools.dd import Dd
from test_tools.disk_utils import Filesystem
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine
from test_utils.filesystem.file import File
from test_utils.os_utils import sync, Udev
from .io_class_common import *
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_lba():
"""Write data to random lba and check if it is cached according to range
defined in ioclass rule"""
cache, core = prepare()
ioclass_id = 1
min_cached_lba = 56
max_cached_lba = 200
iterations = 100
dd_size = Size(1, Unit.Blocks512)
dd_count = 1
# Prepare ioclass config
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"lba:ge:{min_cached_lba}&lba:le:{max_cached_lba}&done",
ioclass_config_path=ioclass_config_path,
)
# Prepare cache for test
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
cache.flush_cache()
# Check if lbas from defined range are cached
dirty_count = 0
# '8' step is set to prevent writing cache line more than once
TestRun.LOGGER.info(f"Writing to one sector in each cache line from range.")
for lba in range(min_cached_lba, max_cached_lba, 8):
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.seek(lba)
)
dd.run()
sync()
dirty_count += 1
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != dirty_count:
TestRun.LOGGER.error(f"LBA {lba} not cached")
cache.flush_cache()
# Check if lba outside of defined range are not cached
TestRun.LOGGER.info(f"Writing to random sectors outside of cached range.")
for i in range(iterations):
rand_lba = random.randrange(2000)
if min_cached_lba <= rand_lba <= max_cached_lba:
continue
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.count(dd_count)
.block_size(dd_size)
.seek(rand_lba)
)
dd.run()
sync()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.LOGGER.error(f"Inappropriately cached lba: {rand_lba}")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_ioclass_request_size():
cache, core = prepare()
ioclass_id = 1
iterations = 100
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule=f"request_size:ge:8192&request_size:le:16384&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
Udev.disable()
# Check if requests with appropriate size are cached
TestRun.LOGGER.info(
f"Check if requests with size within defined range are cached"
)
cached_req_sizes = [Size(2, Unit.Blocks4096), Size(4, Unit.Blocks4096)]
for i in range(iterations):
cache.flush_cache()
req_size = random.choice(cached_req_sizes)
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(1)
.block_size(req_size)
.oflag("direct")
)
dd.run()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != req_size.value / Unit.Blocks4096.value:
TestRun.fail("Incorrect number of dirty blocks!")
cache.flush_cache()
# Check if requests with inappropriate size are not cached
TestRun.LOGGER.info(
f"Check if requests with size outside defined range are not cached"
)
not_cached_req_sizes = [
Size(1, Unit.Blocks4096),
Size(8, Unit.Blocks4096),
Size(16, Unit.Blocks4096),
]
for i in range(iterations):
req_size = random.choice(not_cached_req_sizes)
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.count(1)
.block_size(req_size)
.oflag("direct")
)
dd.run()
dirty = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.dirty
if dirty.get_value(Unit.Blocks4096) != 0:
TestRun.fail("Dirty data present!")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", list(Filesystem) + [False])
def test_ioclass_direct(filesystem):
"""
Perform buffered/direct IO to/from files or raw block device.
Data from buffered IO should be cached.
Data from buffered IO should not be cached and if performed to/from already cached data
should cause reclassification to unclassified IO class.
"""
cache, core = prepare()
Udev.disable()
ioclass_id = 1
io_size = Size(random.randint(1000, 2000), Unit.Blocks4096)
# direct IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule="direct",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
fio = (
Fio().create_command()
.io_engine(IoEngine.libaio)
.size(io_size)
.offset(io_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/tmp_file" if filesystem else core.system_path)
)
if filesystem:
TestRun.LOGGER.info(
f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at"
f" {mountpoint}"
)
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
else:
TestRun.LOGGER.info("Testing on raw exported object")
base_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
TestRun.LOGGER.info(f"Buffered writes to {'file' if filesystem else 'device'}")
fio.run()
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy:
TestRun.fail("Buffered writes were cached!\n"
f"Expected: {base_occupancy}, actual: {new_occupancy}")
TestRun.LOGGER.info(f"Direct writes to {'file' if filesystem else 'device'}")
fio.direct()
fio.run()
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + io_size:
TestRun.fail("Wrong number of direct writes was cached!\n"
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
TestRun.LOGGER.info(f"Buffered reads from {'file' if filesystem else 'device'}")
fio.remove_param("readwrite").remove_param("direct")
fio.read_write(ReadWrite.read)
fio.run()
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy:
TestRun.fail("Buffered reads did not cause reclassification!"
f"Expected occupancy: {base_occupancy}, actual: {new_occupancy}")
TestRun.LOGGER.info(f"Direct reads from {'file' if filesystem else 'device'}")
fio.direct()
fio.run()
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=ioclass_id).usage_stats.occupancy
if new_occupancy != base_occupancy + io_size:
TestRun.fail("Wrong number of direct reads was cached!\n"
f"Expected: {base_occupancy + io_size}, actual: {new_occupancy}")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_metadata(filesystem):
"""
Perform operations on files that cause metadata update.
Determine if every such operation results in increased writes to cached metadata.
Exact values may not be tested as each file system has different metadata structure.
"""
cache, core = prepare()
Udev.disable()
ioclass_id = random.randint(1, ioclass_config.MAX_IO_CLASS_ID)
# metadata IO class
ioclass_config.add_ioclass(
ioclass_id=ioclass_id,
eviction_priority=1,
allocation=True,
rule="metadata&done",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
requests_to_metadata_before = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
TestRun.LOGGER.info("Creating 20 test files")
files = []
for i in range(1, 21):
file_path = f"{mountpoint}/test_file_{i}"
dd = (
Dd()
.input("/dev/urandom")
.output(file_path)
.count(random.randint(5, 50))
.block_size(Size(1, Unit.MebiByte))
.oflag("sync")
)
dd.run()
files.append(File(file_path))
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while creating files!")
requests_to_metadata_before = requests_to_metadata_after
TestRun.LOGGER.info("Renaming all test files")
for file in files:
file.move(f"{file.full_path}_renamed")
sync()
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while renaming files!")
requests_to_metadata_before = requests_to_metadata_after
test_dir_path = f"{mountpoint}/test_dir"
TestRun.LOGGER.info(f"Creating directory {test_dir_path}")
fs_utils.create_directory(path=test_dir_path)
TestRun.LOGGER.info(f"Moving test files into {test_dir_path}")
for file in files:
file.move(test_dir_path)
sync()
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while moving files!")
TestRun.LOGGER.info(f"Removing {test_dir_path}")
fs_utils.remove(path=test_dir_path, force=True, recursive=True)
TestRun.LOGGER.info("Checking requests to metadata")
requests_to_metadata_after = cache.get_io_class_statistics(
io_class_id=ioclass_id).request_stats.write
if requests_to_metadata_after == requests_to_metadata_before:
TestRun.fail("No requests to metadata while deleting directory with files!")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_id_as_condition(filesystem):
"""
Load config in which IO class ids are used as conditions in other IO class definitions.
Check if performed IO is properly classified.
"""
cache, core = prepare()
Udev.disable()
base_dir_path = f"{mountpoint}/base_dir"
ioclass_file_size = Size(random.randint(25, 50), Unit.MebiByte)
ioclass_file_size_bytes = int(ioclass_file_size.get_value(Unit.Byte))
# directory condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"directory:{base_dir_path}",
ioclass_config_path=ioclass_config_path,
)
# file size condition
ioclass_config.add_ioclass(
ioclass_id=2,
eviction_priority=1,
allocation=True,
rule=f"file_size:eq:{ioclass_file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
# direct condition
ioclass_config.add_ioclass(
ioclass_id=3,
eviction_priority=1,
allocation=True,
rule="direct",
ioclass_config_path=ioclass_config_path,
)
# IO class 1 OR 2 condition
ioclass_config.add_ioclass(
ioclass_id=4,
eviction_priority=1,
allocation=True,
rule="io_class:1|io_class:2",
ioclass_config_path=ioclass_config_path,
)
# IO class 4 AND file size condition (same as IO class 2)
ioclass_config.add_ioclass(
ioclass_id=5,
eviction_priority=1,
allocation=True,
rule=f"io_class:4&file_size:eq:{ioclass_file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
# IO class 3 condition
ioclass_config.add_ioclass(
ioclass_id=6,
eviction_priority=1,
allocation=True,
rule="io_class:3",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(base_dir_path)
sync()
# IO fulfilling IO class 1 condition (and not IO class 2)
# Should be classified as IO class 4
base_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
non_ioclass_file_size = Size(random.randrange(1, 25), Unit.MebiByte)
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(non_ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_1")
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=4).usage_stats.occupancy
if new_occupancy != base_occupancy + non_ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + non_ioclass_file_size}, actual: {new_occupancy}")
# IO fulfilling IO class 2 condition (and not IO class 1)
# Should be classified as IO class 5
base_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file_2")
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
# IO fulfilling IO class 1 and 2 conditions
# Should be classified as IO class 5
base_occupancy = new_occupancy
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_3")
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=5).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
# Same IO but direct
# Should be classified as IO class 6
base_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(ioclass_file_size)
.read_write(ReadWrite.write)
.target(f"{base_dir_path}/test_file_3")
.direct()
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=6).usage_stats.occupancy
if new_occupancy != base_occupancy + ioclass_file_size:
TestRun.fail("Writes were not properly cached!\n"
f"Expected: {base_occupancy + ioclass_file_size}, actual: {new_occupancy}")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_conditions_or(filesystem):
"""
Load config with IO class combining 5 contradicting conditions connected by OR operator.
Check if every IO fulfilling one condition is classified properly.
"""
cache, core = prepare()
Udev.disable()
# directories OR condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"directory:{mountpoint}/dir1|directory:{mountpoint}/dir2|directory:"
f"{mountpoint}/dir3|directory:{mountpoint}/dir4|directory:{mountpoint}/dir5",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
for i in range(1, 6):
fs_utils.create_directory(f"{mountpoint}/dir{i}")
sync()
# Perform IO fulfilling each condition and check if occupancy raises
for i in range(1, 6):
file_size = Size(random.randint(25, 50), Unit.MebiByte)
base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(file_size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/dir{i}/test_file")
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size:
TestRun.fail("Occupancy has not increased correctly!\n"
f"Expected: {base_occupancy + file_size}, actual: {new_occupancy}")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_conditions_and(filesystem):
"""
Load config with IO class combining 5 conditions contradicting at least one other condition
connected by AND operator.
Check if every IO fulfilling one of the conditions is not classified.
"""
cache, core = prepare()
Udev.disable()
file_size = Size(random.randint(25, 50), Unit.MebiByte)
file_size_bytes = int(file_size.get_value(Unit.Byte))
# directories OR condition
ioclass_config.add_ioclass(
ioclass_id=1,
eviction_priority=1,
allocation=True,
rule=f"file_size:gt:{file_size_bytes}&file_size:lt:{file_size_bytes}&"
f"file_size:ge:{file_size_bytes}&file_size:le:{file_size_bytes}&"
f"file_size:eq:{file_size_bytes}",
ioclass_config_path=ioclass_config_path,
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
base_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
# Perform IO
for size in [file_size, file_size + Size(1, Unit.MebiByte), file_size - Size(1, Unit.MebiByte)]:
(Fio().create_command()
.io_engine(IoEngine.libaio)
.size(size)
.read_write(ReadWrite.write)
.target(f"{mountpoint}/test_file")
.run())
sync()
new_occupancy = cache.get_io_class_statistics(io_class_id=1).usage_stats.occupancy
if new_occupancy != base_occupancy:
TestRun.fail("Unexpected occupancy increase!\n"
f"Expected: {base_occupancy}, actual: {new_occupancy}")
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
@pytest.mark.parametrizex("filesystem", Filesystem)
def test_ioclass_effective_ioclass(filesystem):
"""
title: Effective IO class with multiple non-exclusive conditions
description: |
Test CAS ability to properly classify IO fulfilling multiple conditions based on
IO class ids and presence of '&done' annotation in IO class rules
pass_criteria:
- In every iteration first IO is classified to the last in order IO class
- In every iteration second IO is classified to the IO class with '&done' annotation
"""
with TestRun.LOGGER.step(f"Test prepare"):
cache, core = prepare()
Udev.disable()
file_size = Size(10, Unit.Blocks4096)
file_size_bytes = int(file_size.get_value(Unit.Byte))
test_dir = f"{mountpoint}/test"
rules = ["direct", # rule contradicting other rules
f"directory:{test_dir}",
f"file_size:le:{2 * file_size_bytes}",
f"file_size:ge:{file_size_bytes // 2}"]
with TestRun.LOGGER.step(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}"):
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(test_dir)
sync()
for i, permutation in TestRun.iteration(enumerate(permutations(range(1, 5)), start=1)):
with TestRun.LOGGER.step("Load IO classes in order specified by permutation"):
load_io_classes_in_permutation_order(rules, permutation, cache)
io_class_id = 3 if rules[permutation.index(4)] == "direct" else 4
with TestRun.LOGGER.step("Perform IO fulfilling the non-contradicting conditions"):
base_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
fio = (Fio().create_command()
.io_engine(IoEngine.libaio)
.size(file_size)
.read_write(ReadWrite.write)
.target(f"{test_dir}/test_file{i}"))
fio.run()
sync()
with TestRun.LOGGER.step("Check if IO was properly classified "
"(to the last non-contradicting IO class)"):
new_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size:
TestRun.LOGGER.error("Wrong IO classification!\n"
f"Expected: {base_occupancy + file_size}, "
f"actual: {new_occupancy}")
with TestRun.LOGGER.step("Add '&done' to the second in order non-contradicting condition"):
io_class_id = add_done_to_second_non_exclusive_condition(rules, permutation, cache)
with TestRun.LOGGER.step("Repeat IO"):
base_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
fio.run()
sync()
with TestRun.LOGGER.step("Check if IO was properly classified "
"(to the IO class with '&done' annotation)"):
new_occupancy = cache.get_io_class_statistics(
io_class_id=io_class_id).usage_stats.occupancy
if new_occupancy != base_occupancy + file_size:
TestRun.LOGGER.error("Wrong IO classification!\n"
f"Expected: {base_occupancy + file_size}, "
f"actual: {new_occupancy}")
def load_io_classes_in_permutation_order(rules, permutation, cache):
ioclass_config.remove_ioclass_config(ioclass_config_path=ioclass_config_path)
ioclass_config.create_ioclass_config(
add_default_rule=False, ioclass_config_path=ioclass_config_path
)
# To make test more precise all workload except of tested ioclass should be
# put in pass-through mode
ioclass_list = [IoClass.default(allocation=False)]
for n in range(len(rules)):
ioclass_list.append(IoClass(class_id=permutation[n], rule=rules[n]))
IoClass.save_list_to_config_file(ioclass_list,
add_default_rule=False,
ioclass_config_path=ioclass_config_path)
casadm.load_io_classes(cache.cache_id, file=ioclass_config_path)
def add_done_to_second_non_exclusive_condition(rules, permutation, cache):
non_exclusive_conditions = 0
second_class_id = 1
while True:
idx = permutation.index(second_class_id)
if rules[idx] != "direct":
non_exclusive_conditions += 1
if non_exclusive_conditions == 2:
break
second_class_id += 1
fs_utils.replace_first_pattern_occurrence(ioclass_config_path,
rules[idx], f"{rules[idx]}&done")
sync()
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
return second_class_id
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from typing import Tuple
from pants.backend.python.lint.pylint.subsystem import (
Pylint,
PylintFieldSet,
PylintFirstPartyPlugins,
)
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.util_rules import partition, pex_from_targets
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import (
Pex,
PexRequest,
VenvPex,
VenvPexProcess,
VenvPexRequest,
)
from pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.lint import REPORT_DIR, LintResult, LintResults, LintTargetsRequest
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.collection import Collection
from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests, RemovePrefix
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import CoarsenedTargets, Target
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class PylintPartition:
root_field_sets: FrozenOrderedSet[PylintFieldSet]
closure: FrozenOrderedSet[Target]
resolve_description: str | None
interpreter_constraints: InterpreterConstraints
def description(self) -> str:
ics = str(sorted(str(c) for c in self.interpreter_constraints))
return f"{self.resolve_description}, {ics}" if self.resolve_description else ics
class PylintPartitions(Collection[PylintPartition]):
pass
class PylintRequest(LintTargetsRequest):
field_set_type = PylintFieldSet
name = Pylint.options_scope
def generate_argv(source_files: SourceFiles, pylint: Pylint) -> Tuple[str, ...]:
args = []
if pylint.config is not None:
args.append(f"--rcfile={pylint.config}")
args.append("--jobs={pants_concurrency}")
args.extend(pylint.args)
args.extend(source_files.files)
return tuple(args)
@rule(level=LogLevel.DEBUG)
async def pylint_lint_partition(
partition: PylintPartition, pylint: Pylint, first_party_plugins: PylintFirstPartyPlugins
) -> LintResult:
requirements_pex_get = Get(
Pex,
RequirementsPexRequest(
(fs.address for fs in partition.root_field_sets),
# NB: These constraints must be identical to the other PEXes. Otherwise, we risk using
# a different version for the requirements than the other two PEXes, which can result
# in a PEX runtime error about missing dependencies.
hardcoded_interpreter_constraints=partition.interpreter_constraints,
),
)
pylint_pex_get = Get(
Pex,
PexRequest,
pylint.to_pex_request(
interpreter_constraints=partition.interpreter_constraints,
extra_requirements=first_party_plugins.requirement_strings,
),
)
prepare_python_sources_get = Get(PythonSourceFiles, PythonSourceFilesRequest(partition.closure))
field_set_sources_get = Get(
SourceFiles, SourceFilesRequest(fs.source for fs in partition.root_field_sets)
)
# Ensure that the empty report dir exists.
report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))
(
pylint_pex,
requirements_pex,
prepared_python_sources,
field_set_sources,
report_directory,
) = await MultiGet(
pylint_pex_get,
requirements_pex_get,
prepare_python_sources_get,
field_set_sources_get,
report_directory_digest_get,
)
pylint_runner_pex, config_files = await MultiGet(
Get(
VenvPex,
VenvPexRequest(
PexRequest(
output_filename="pylint_runner.pex",
interpreter_constraints=partition.interpreter_constraints,
main=pylint.main,
internal_only=True,
pex_path=[pylint_pex, requirements_pex],
),
# TODO(John Sirois): Remove this (change to the default of symlinks) when we can
# upgrade to a version of Pylint with https://github.com/PyCQA/pylint/issues/1470
# resolved.
site_packages_copies=True,
),
),
Get(
ConfigFiles, ConfigFilesRequest, pylint.config_request(field_set_sources.snapshot.dirs)
),
)
pythonpath = list(prepared_python_sources.source_roots)
if first_party_plugins:
pythonpath.append(first_party_plugins.PREFIX)
input_digest = await Get(
Digest,
MergeDigests(
(
config_files.snapshot.digest,
first_party_plugins.sources_digest,
prepared_python_sources.source_files.snapshot.digest,
report_directory,
)
),
)
result = await Get(
FallibleProcessResult,
VenvPexProcess(
pylint_runner_pex,
argv=generate_argv(field_set_sources, pylint),
input_digest=input_digest,
output_directories=(REPORT_DIR,),
extra_env={"PEX_EXTRA_SYS_PATH": ":".join(pythonpath)},
concurrency_available=len(partition.root_field_sets),
description=f"Run Pylint on {pluralize(len(partition.root_field_sets), "file")}.",
level=LogLevel.DEBUG,
),
)
report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))
return LintResult.from_fallible_process_result(
result,
partition_description=partition.description(),
report=report,
)
@rule(desc="Determine if necessary to partition Pylint input", level=LogLevel.DEBUG)
async def pylint_determine_partitions(
request: PylintRequest, python_setup: PythonSetup, first_party_plugins: PylintFirstPartyPlugins
) -> PylintPartitions:
resolve_and_interpreter_constraints_to_coarsened_targets = (
await partition._by_interpreter_constraints_and_resolve(request.field_sets, python_setup)
)
first_party_ics = InterpreterConstraints.create_from_compatibility_fields(
first_party_plugins.interpreter_constraints_fields, python_setup
)
return PylintPartitions(
PylintPartition(
FrozenOrderedSet(roots),
FrozenOrderedSet(CoarsenedTargets(root_cts).closure()),
resolve if len(python_setup.resolves) > 1 else None,
InterpreterConstraints.merge((interpreter_constraints, first_party_ics)),
)
for (resolve, interpreter_constraints), (roots, root_cts) in sorted(
resolve_and_interpreter_constraints_to_coarsened_targets.items()
)
)
@rule(desc="Lint using Pylint", level=LogLevel.DEBUG)
async def pylint_lint(request: PylintRequest, pylint: Pylint) -> LintResults:
if pylint.skip:
return LintResults([], linter_name=request.name)
partitions = await Get(PylintPartitions, PylintRequest, request)
partitioned_results = await MultiGet(
Get(LintResult, PylintPartition, partition) for partition in partitions
)
return LintResults(partitioned_results, linter_name=request.name)
def rules():
return [
*collect_rules(),
UnionRule(LintTargetsRequest, PylintRequest),
*pex_from_targets.rules(),
]
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from typing import Tuple
from pants.backend.python.lint.pylint.subsystem import (
Pylint,
PylintFieldSet,
PylintFirstPartyPlugins,
)
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.util_rules import partition, pex_from_targets
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import (
Pex,
PexRequest,
VenvPex,
VenvPexProcess,
VenvPexRequest,
)
from pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.lint import REPORT_DIR, LintResult, LintResults, LintTargetsRequest
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.collection import Collection
from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests, RemovePrefix
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import CoarsenedTargets, Target
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class PylintPartition:
root_field_sets: FrozenOrderedSet[PylintFieldSet]
closure: FrozenOrderedSet[Target]
resolve_description: str | None
interpreter_constraints: InterpreterConstraints
def description(self) -> str:
ics = str(sorted(str(c) for c in self.interpreter_constraints))
return f"{self.resolve_description}, {ics}" if self.resolve_description else ics
class PylintPartitions(Collection[PylintPartition]):
pass
class PylintRequest(LintTargetsRequest):
field_set_type = PylintFieldSet
name = Pylint.options_scope
def generate_argv(source_files: SourceFiles, pylint: Pylint) -> Tuple[str, ...]:
args = []
if pylint.config is not None:
args.append(f"--rcfile={pylint.config}")
args.append("--jobs={pants_concurrency}")
args.extend(pylint.args)
args.extend(source_files.files)
return tuple(args)
@rule(level=LogLevel.DEBUG)
async def pylint_lint_partition(
partition: PylintPartition, pylint: Pylint, first_party_plugins: PylintFirstPartyPlugins
) -> LintResult:
requirements_pex_get = Get(
Pex,
RequirementsPexRequest(
(fs.address for fs in partition.root_field_sets),
# NB: These constraints must be identical to the other PEXes. Otherwise, we risk using
# a different version for the requirements than the other two PEXes, which can result
# in a PEX runtime error about missing dependencies.
hardcoded_interpreter_constraints=partition.interpreter_constraints,
),
)
pylint_pex_get = Get(
Pex,
PexRequest,
pylint.to_pex_request(
interpreter_constraints=partition.interpreter_constraints,
extra_requirements=first_party_plugins.requirement_strings,
),
)
prepare_python_sources_get = Get(PythonSourceFiles, PythonSourceFilesRequest(partition.closure))
field_set_sources_get = Get(
SourceFiles, SourceFilesRequest(fs.source for fs in partition.root_field_sets)
)
# Ensure that the empty report dir exists.
report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))
(
pylint_pex,
requirements_pex,
prepared_python_sources,
field_set_sources,
report_directory,
) = await MultiGet(
pylint_pex_get,
requirements_pex_get,
prepare_python_sources_get,
field_set_sources_get,
report_directory_digest_get,
)
pylint_runner_pex, config_files = await MultiGet(
Get(
VenvPex,
VenvPexRequest(
PexRequest(
output_filename="pylint_runner.pex",
interpreter_constraints=partition.interpreter_constraints,
main=pylint.main,
internal_only=True,
pex_path=[pylint_pex, requirements_pex],
),
# TODO(John Sirois): Remove this (change to the default of symlinks) when we can
# upgrade to a version of Pylint with https://github.com/PyCQA/pylint/issues/1470
# resolved.
site_packages_copies=True,
),
),
Get(
ConfigFiles, ConfigFilesRequest, pylint.config_request(field_set_sources.snapshot.dirs)
),
)
pythonpath = list(prepared_python_sources.source_roots)
if first_party_plugins:
pythonpath.append(first_party_plugins.PREFIX)
input_digest = await Get(
Digest,
MergeDigests(
(
config_files.snapshot.digest,
first_party_plugins.sources_digest,
prepared_python_sources.source_files.snapshot.digest,
report_directory,
)
),
)
result = await Get(
FallibleProcessResult,
VenvPexProcess(
pylint_runner_pex,
argv=generate_argv(field_set_sources, pylint),
input_digest=input_digest,
output_directories=(REPORT_DIR,),
extra_env={"PEX_EXTRA_SYS_PATH": ":".join(pythonpath)},
concurrency_available=len(partition.root_field_sets),
description=f"Run Pylint on {pluralize(len(partition.root_field_sets), 'file')}.",
level=LogLevel.DEBUG,
),
)
report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))
return LintResult.from_fallible_process_result(
result,
partition_description=partition.description(),
report=report,
)
@rule(desc="Determine if necessary to partition Pylint input", level=LogLevel.DEBUG)
async def pylint_determine_partitions(
request: PylintRequest, python_setup: PythonSetup, first_party_plugins: PylintFirstPartyPlugins
) -> PylintPartitions:
resolve_and_interpreter_constraints_to_coarsened_targets = (
await partition._by_interpreter_constraints_and_resolve(request.field_sets, python_setup)
)
first_party_ics = InterpreterConstraints.create_from_compatibility_fields(
first_party_plugins.interpreter_constraints_fields, python_setup
)
return PylintPartitions(
PylintPartition(
FrozenOrderedSet(roots),
FrozenOrderedSet(CoarsenedTargets(root_cts).closure()),
resolve if len(python_setup.resolves) > 1 else None,
InterpreterConstraints.merge((interpreter_constraints, first_party_ics)),
)
for (resolve, interpreter_constraints), (roots, root_cts) in sorted(
resolve_and_interpreter_constraints_to_coarsened_targets.items()
)
)
@rule(desc="Lint using Pylint", level=LogLevel.DEBUG)
async def pylint_lint(request: PylintRequest, pylint: Pylint) -> LintResults:
if pylint.skip:
return LintResults([], linter_name=request.name)
partitions = await Get(PylintPartitions, PylintRequest, request)
partitioned_results = await MultiGet(
Get(LintResult, PylintPartition, partition) for partition in partitions
)
return LintResults(partitioned_results, linter_name=request.name)
def rules():
return [
*collect_rules(),
UnionRule(LintTargetsRequest, PylintRequest),
*pex_from_targets.rules(),
]
|
import os
import sys
import importlib
import argparse
import csv
import numpy as np
import time
import pickle
import pathlib
import gzip
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import svmrank
import utilities
from utilities_tf import load_batch_gcnn
def load_batch_flat(sample_files, feats_type, augment_feats, normalize_feats):
cand_features = []
cand_choices = []
cand_scoress = []
for i, filename in enumerate(sample_files):
cand_states, cand_scores, cand_choice = utilities.load_flat_samples(filename, feats_type, 'scores', augment_feats, normalize_feats)
cand_features.append(cand_states)
cand_choices.append(cand_choice)
cand_scoress.append(cand_scores)
n_cands_per_sample = [v.shape[0] for v in cand_features]
cand_features = np.concatenate(cand_features, axis=0).astype(np.float32, copy=False)
cand_choices = np.asarray(cand_choices).astype(np.int32, copy=False)
cand_scoress = np.concatenate(cand_scoress, axis=0).astype(np.float32, copy=False)
n_cands_per_sample = np.asarray(n_cands_per_sample).astype(np.int32, copy=False)
return cand_features, n_cands_per_sample, cand_choices, cand_scoress
def padding(output, n_vars_per_sample, fill=-1e8):
n_vars_max = tf.reduce_max(n_vars_per_sample)
output = tf.split(
value=output,
num_or_size_splits=n_vars_per_sample,
axis=1,
)
output = tf.concat([
tf.pad(
x,
paddings=[[0, 0], [0, n_vars_max - tf.shape(x)[1]]],
mode='CONSTANT',
constant_values=fill)
for x in output
], axis=0)
return output
def process(policy, dataloader, top_k):
mean_kacc = np.zeros(len(top_k))
n_samples_processed = 0
for batch in dataloader:
if policy['type'] == 'gcnn':
c, ei, ev, v, n_cs, n_vs, n_cands, cands, best_cands, cand_scores = batch
pred_scores = policy['model']((c, ei, ev, v, tf.reduce_sum(n_cs, keepdims=True), tf.reduce_sum(n_vs, keepdims=True)), tf.convert_to_tensor(False))
# filter candidate variables
pred_scores = tf.expand_dims(tf.gather(tf.squeeze(pred_scores, 0), cands), 0)
elif policy['type'] == 'ml-competitor':
cand_feats, n_cands, best_cands, cand_scores = batch
# move to numpy
cand_feats = cand_feats.numpy()
n_cands = n_cands.numpy()
# feature normalization
cand_feats = (cand_feats - policy['feat_shift']) / policy['feat_scale']
pred_scores = policy['model'].predict(cand_feats)
# move back to TF
pred_scores = tf.convert_to_tensor(pred_scores.reshape((1, -1)), dtype=tf.float32)
# padding
pred_scores = padding(pred_scores, n_cands)
true_scores = padding(tf.reshape(cand_scores, (1, -1)), n_cands)
true_bestscore = tf.reduce_max(true_scores, axis=-1, keepdims=True)
assert all(true_bestscore.numpy() == np.take_along_axis(true_scores.numpy(), best_cands.numpy().reshape((-1, 1)), axis=1))
kacc = []
for k in top_k:
pred_top_k = tf.nn.top_k(pred_scores, k=k)[1].numpy()
pred_top_k_true_scores = np.take_along_axis(true_scores.numpy(), pred_top_k, axis=1)
kacc.append(np.mean(np.any(pred_top_k_true_scores == true_bestscore.numpy(), axis=1)))
kacc = np.asarray(kacc)
batch_size = int(n_cands.shape[0])
mean_kacc += kacc * batch_size
n_samples_processed += batch_size
mean_kacc /= n_samples_processed
return mean_kacc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'problem',
help='MILP instance type to process.',
choices=['setcover', 'cauctions', 'facilities', 'indset'],
)
parser.add_argument(
'-g', '--gpu',
help='CUDA GPU id (-1 for CPU).',
type=int,
default=0,
)
args = parser.parse_args()
print(f"problem: {args.problem}")
print(f"gpu: {args.gpu}")
os.makedirs("results", exist_ok=True)
result_file = f"results/{args.problem}_validation_{time.strftime("%Y%m%d-%H%M%S")}.csv"
seeds = [0, 1, 2, 3, 4]
gcnn_models = ['baseline']
other_models = ['extratrees_gcnn_agg', 'lambdamart_khalil', 'svmrank_khalil']
test_batch_size = 128
top_k = [1, 3, 5, 10]
problem_folders = {
'setcover': 'setcover/500r_1000c_0.05d',
'cauctions': 'cauctions/100_500',
'facilities': 'facilities/100_100_5',
'indset': 'indset/500_4',
}
problem_folder = problem_folders[args.problem]
if args.problem == 'setcover':
gcnn_models += ['mean_convolution', 'no_prenorm']
result_file = f"results/{args.problem}_test_{time.strftime("%Y%m%d-%H%M%S")}"
result_file = result_file + '.csv'
os.makedirs('results', exist_ok=True)
### TENSORFLOW SETUP ###
if args.gpu == -1:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
else:
os.environ['CUDA_VISIBLE_DEVICES'] = f'{args.gpu}'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.enable_eager_execution(config)
tf.executing_eagerly()
test_files = list(pathlib.Path(f"data/samples/{problem_folder}/test").glob('sample_*.pkl'))
test_files = [str(x) for x in test_files]
print(f"{len(test_files)} test samples")
evaluated_policies = [['gcnn', model] for model in gcnn_models] + \
[['ml-competitor', model] for model in other_models]
fieldnames = [
'policy',
'seed',
] + [
f'acc@{k}' for k in top_k
]
with open(result_file, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for policy_type, policy_name in evaluated_policies:
print(f"{policy_type}:{policy_name}...")
for seed in seeds:
rng = np.random.RandomState(seed)
tf.set_random_seed(rng.randint(np.iinfo(int).max))
policy = {}
policy['name'] = policy_name
policy['type'] = policy_type
if policy['type'] == 'gcnn':
# load model
sys.path.insert(0, os.path.abspath(f"models/{policy["name"]}"))
import model
importlib.reload(model)
del sys.path[0]
policy['model'] = model.GCNPolicy()
policy['model'].restore_state(f"trained_models/{args.problem}/{policy["name"]}/{seed}/best_params.pkl")
policy['model'].call = tfe.defun(policy['model'].call, input_signature=policy['model'].input_signature)
policy['batch_datatypes'] = [tf.float32, tf.int32, tf.float32,
tf.float32, tf.int32, tf.int32, tf.int32, tf.int32, tf.int32, tf.float32]
policy['batch_fun'] = load_batch_gcnn
else:
# load feature normalization parameters
try:
with open(f"trained_models/{args.problem}/{policy["name"]}/{seed}/normalization.pkl", 'rb') as f:
policy['feat_shift'], policy['feat_scale'] = pickle.load(f)
except:
policy['feat_shift'], policy['feat_scale'] = 0, 1
# load model
if policy_name.startswith('svmrank'):
policy['model'] = svmrank.Model().read(f"trained_models/{args.problem}/{policy["name"]}/{seed}/model.txt")
else:
with open(f"trained_models/{args.problem}/{policy["name"]}/{seed}/model.pkl", 'rb') as f:
policy['model'] = pickle.load(f)
# load feature specifications
with open(f"trained_models/{args.problem}/{policy["name"]}/{seed}/feat_specs.pkl", 'rb') as f:
feat_specs = pickle.load(f)
policy['batch_datatypes'] = [tf.float32, tf.int32, tf.int32, tf.float32]
policy['batch_fun'] = lambda x: load_batch_flat(x, feat_specs['type'], feat_specs['augment'], feat_specs['qbnorm'])
test_data = tf.data.Dataset.from_tensor_slices(test_files)
test_data = test_data.batch(test_batch_size)
test_data = test_data.map(lambda x: tf.py_func(
policy['batch_fun'], [x], policy['batch_datatypes']))
test_data = test_data.prefetch(2)
test_kacc = process(policy, test_data, top_k)
print(f" {seed} " + " ".join([f"acc@{k}: {100*acc:4.1f}" for k, acc in zip(top_k, test_kacc)]))
writer.writerow({
**{
'policy': f"{policy["type"]}:{policy["name"]}",
'seed': seed,
},
**{
f'acc@{k}': test_kacc[i] for i, k in enumerate(top_k)
},
})
csvfile.flush()
|
import os
import sys
import importlib
import argparse
import csv
import numpy as np
import time
import pickle
import pathlib
import gzip
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import svmrank
import utilities
from utilities_tf import load_batch_gcnn
def load_batch_flat(sample_files, feats_type, augment_feats, normalize_feats):
cand_features = []
cand_choices = []
cand_scoress = []
for i, filename in enumerate(sample_files):
cand_states, cand_scores, cand_choice = utilities.load_flat_samples(filename, feats_type, 'scores', augment_feats, normalize_feats)
cand_features.append(cand_states)
cand_choices.append(cand_choice)
cand_scoress.append(cand_scores)
n_cands_per_sample = [v.shape[0] for v in cand_features]
cand_features = np.concatenate(cand_features, axis=0).astype(np.float32, copy=False)
cand_choices = np.asarray(cand_choices).astype(np.int32, copy=False)
cand_scoress = np.concatenate(cand_scoress, axis=0).astype(np.float32, copy=False)
n_cands_per_sample = np.asarray(n_cands_per_sample).astype(np.int32, copy=False)
return cand_features, n_cands_per_sample, cand_choices, cand_scoress
def padding(output, n_vars_per_sample, fill=-1e8):
n_vars_max = tf.reduce_max(n_vars_per_sample)
output = tf.split(
value=output,
num_or_size_splits=n_vars_per_sample,
axis=1,
)
output = tf.concat([
tf.pad(
x,
paddings=[[0, 0], [0, n_vars_max - tf.shape(x)[1]]],
mode='CONSTANT',
constant_values=fill)
for x in output
], axis=0)
return output
def process(policy, dataloader, top_k):
mean_kacc = np.zeros(len(top_k))
n_samples_processed = 0
for batch in dataloader:
if policy['type'] == 'gcnn':
c, ei, ev, v, n_cs, n_vs, n_cands, cands, best_cands, cand_scores = batch
pred_scores = policy['model']((c, ei, ev, v, tf.reduce_sum(n_cs, keepdims=True), tf.reduce_sum(n_vs, keepdims=True)), tf.convert_to_tensor(False))
# filter candidate variables
pred_scores = tf.expand_dims(tf.gather(tf.squeeze(pred_scores, 0), cands), 0)
elif policy['type'] == 'ml-competitor':
cand_feats, n_cands, best_cands, cand_scores = batch
# move to numpy
cand_feats = cand_feats.numpy()
n_cands = n_cands.numpy()
# feature normalization
cand_feats = (cand_feats - policy['feat_shift']) / policy['feat_scale']
pred_scores = policy['model'].predict(cand_feats)
# move back to TF
pred_scores = tf.convert_to_tensor(pred_scores.reshape((1, -1)), dtype=tf.float32)
# padding
pred_scores = padding(pred_scores, n_cands)
true_scores = padding(tf.reshape(cand_scores, (1, -1)), n_cands)
true_bestscore = tf.reduce_max(true_scores, axis=-1, keepdims=True)
assert all(true_bestscore.numpy() == np.take_along_axis(true_scores.numpy(), best_cands.numpy().reshape((-1, 1)), axis=1))
kacc = []
for k in top_k:
pred_top_k = tf.nn.top_k(pred_scores, k=k)[1].numpy()
pred_top_k_true_scores = np.take_along_axis(true_scores.numpy(), pred_top_k, axis=1)
kacc.append(np.mean(np.any(pred_top_k_true_scores == true_bestscore.numpy(), axis=1)))
kacc = np.asarray(kacc)
batch_size = int(n_cands.shape[0])
mean_kacc += kacc * batch_size
n_samples_processed += batch_size
mean_kacc /= n_samples_processed
return mean_kacc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'problem',
help='MILP instance type to process.',
choices=['setcover', 'cauctions', 'facilities', 'indset'],
)
parser.add_argument(
'-g', '--gpu',
help='CUDA GPU id (-1 for CPU).',
type=int,
default=0,
)
args = parser.parse_args()
print(f"problem: {args.problem}")
print(f"gpu: {args.gpu}")
os.makedirs("results", exist_ok=True)
result_file = f"results/{args.problem}_validation_{time.strftime('%Y%m%d-%H%M%S')}.csv"
seeds = [0, 1, 2, 3, 4]
gcnn_models = ['baseline']
other_models = ['extratrees_gcnn_agg', 'lambdamart_khalil', 'svmrank_khalil']
test_batch_size = 128
top_k = [1, 3, 5, 10]
problem_folders = {
'setcover': 'setcover/500r_1000c_0.05d',
'cauctions': 'cauctions/100_500',
'facilities': 'facilities/100_100_5',
'indset': 'indset/500_4',
}
problem_folder = problem_folders[args.problem]
if args.problem == 'setcover':
gcnn_models += ['mean_convolution', 'no_prenorm']
result_file = f"results/{args.problem}_test_{time.strftime('%Y%m%d-%H%M%S')}"
result_file = result_file + '.csv'
os.makedirs('results', exist_ok=True)
### TENSORFLOW SETUP ###
if args.gpu == -1:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
else:
os.environ['CUDA_VISIBLE_DEVICES'] = f'{args.gpu}'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.enable_eager_execution(config)
tf.executing_eagerly()
test_files = list(pathlib.Path(f"data/samples/{problem_folder}/test").glob('sample_*.pkl'))
test_files = [str(x) for x in test_files]
print(f"{len(test_files)} test samples")
evaluated_policies = [['gcnn', model] for model in gcnn_models] + \
[['ml-competitor', model] for model in other_models]
fieldnames = [
'policy',
'seed',
] + [
f'acc@{k}' for k in top_k
]
with open(result_file, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for policy_type, policy_name in evaluated_policies:
print(f"{policy_type}:{policy_name}...")
for seed in seeds:
rng = np.random.RandomState(seed)
tf.set_random_seed(rng.randint(np.iinfo(int).max))
policy = {}
policy['name'] = policy_name
policy['type'] = policy_type
if policy['type'] == 'gcnn':
# load model
sys.path.insert(0, os.path.abspath(f"models/{policy['name']}"))
import model
importlib.reload(model)
del sys.path[0]
policy['model'] = model.GCNPolicy()
policy['model'].restore_state(f"trained_models/{args.problem}/{policy['name']}/{seed}/best_params.pkl")
policy['model'].call = tfe.defun(policy['model'].call, input_signature=policy['model'].input_signature)
policy['batch_datatypes'] = [tf.float32, tf.int32, tf.float32,
tf.float32, tf.int32, tf.int32, tf.int32, tf.int32, tf.int32, tf.float32]
policy['batch_fun'] = load_batch_gcnn
else:
# load feature normalization parameters
try:
with open(f"trained_models/{args.problem}/{policy['name']}/{seed}/normalization.pkl", 'rb') as f:
policy['feat_shift'], policy['feat_scale'] = pickle.load(f)
except:
policy['feat_shift'], policy['feat_scale'] = 0, 1
# load model
if policy_name.startswith('svmrank'):
policy['model'] = svmrank.Model().read(f"trained_models/{args.problem}/{policy['name']}/{seed}/model.txt")
else:
with open(f"trained_models/{args.problem}/{policy['name']}/{seed}/model.pkl", 'rb') as f:
policy['model'] = pickle.load(f)
# load feature specifications
with open(f"trained_models/{args.problem}/{policy['name']}/{seed}/feat_specs.pkl", 'rb') as f:
feat_specs = pickle.load(f)
policy['batch_datatypes'] = [tf.float32, tf.int32, tf.int32, tf.float32]
policy['batch_fun'] = lambda x: load_batch_flat(x, feat_specs['type'], feat_specs['augment'], feat_specs['qbnorm'])
test_data = tf.data.Dataset.from_tensor_slices(test_files)
test_data = test_data.batch(test_batch_size)
test_data = test_data.map(lambda x: tf.py_func(
policy['batch_fun'], [x], policy['batch_datatypes']))
test_data = test_data.prefetch(2)
test_kacc = process(policy, test_data, top_k)
print(f" {seed} " + " ".join([f"acc@{k}: {100*acc:4.1f}" for k, acc in zip(top_k, test_kacc)]))
writer.writerow({
**{
'policy': f"{policy['type']}:{policy['name']}",
'seed': seed,
},
**{
f'acc@{k}': test_kacc[i] for i, k in enumerate(top_k)
},
})
csvfile.flush()
|
import urllib.request
from datetime import datetime
import string
from argparse import ArgumentParser
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from bs4 import BeautifulSoup
from sortedcontainers import SortedDict
class StockPriceScraper:
def __init__(self, base_url, stock_codes, google_sheet, client_secret, test):
self.stock_codes = stock_codes
self.base_url = base_url
if not test:
self.sheet = client(client_secret).open(google_sheet)
def insert_prices(self):
worksheet = self.sheet.add_worksheet(title=f'{datetime.today().strftime('%Y-%m-%d')}', rows='2', cols=f'{len(self.stock_codes)}')
for i, (stock_code, stock_price) in enumerate(self.stock_prices().items()):
self.update_sheet(worksheet, i, [stock_code, stock_price])
def stock_prices(self):
stock_prices = {}
for stock_code in self.stock_codes:
stock_prices[stock_code] = price(url(self.base_url, stock_code))
return SortedDict(stock_prices)
def update_sheet(self, worksheet, i, contents):
for j, content in enumerate(contents):
update_cell(worksheet, cell(string.ascii_uppercase[i], j), content)
def cell(letter, number):
return f'{letter}{number}'
def update_cell(worksheet, cell, info):
worksheet.update_acell(cell, info)
def client(client_secret):
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name(client_secret, scope)
return gspread.authorize(creds)
def price(url):
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
return soup.find('h2', attrs={'class':'page-content entry-content'}).text.strip()
def url(base_url, stock_code):
return f'{base_url}{stock_code.upper()}'
if __name__ == '__main__':
parser = ArgumentParser(description='Takes stock codes, scrapes prices from website and inserts into a given google sheet')
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='base_url', required=True)
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='client_secret', required=True)
parser.add_argument('-g', '--google-sheet', action='store', help='the google sheet to insert prices into', type=str, dest='google_sheet', required=True)
parser.add_argument('-s', '--stock-codes', action='store', help='the stock codes to get price for', type=str, dest='stock_codes', nargs='+', required=True)
parser.add_argument('-t', '--test', action='store_true', help='Perform test', dest='test' )
args = parser.parse_args().__dict__
StockPriceScraper(**args).insert_prices()
|
import urllib.request
from datetime import datetime
import string
from argparse import ArgumentParser
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from bs4 import BeautifulSoup
from sortedcontainers import SortedDict
class StockPriceScraper:
def __init__(self, base_url, stock_codes, google_sheet, client_secret, test):
self.stock_codes = stock_codes
self.base_url = base_url
if not test:
self.sheet = client(client_secret).open(google_sheet)
def insert_prices(self):
worksheet = self.sheet.add_worksheet(title=f'{datetime.today().strftime("%Y-%m-%d")}', rows='2', cols=f'{len(self.stock_codes)}')
for i, (stock_code, stock_price) in enumerate(self.stock_prices().items()):
self.update_sheet(worksheet, i, [stock_code, stock_price])
def stock_prices(self):
stock_prices = {}
for stock_code in self.stock_codes:
stock_prices[stock_code] = price(url(self.base_url, stock_code))
return SortedDict(stock_prices)
def update_sheet(self, worksheet, i, contents):
for j, content in enumerate(contents):
update_cell(worksheet, cell(string.ascii_uppercase[i], j), content)
def cell(letter, number):
return f'{letter}{number}'
def update_cell(worksheet, cell, info):
worksheet.update_acell(cell, info)
def client(client_secret):
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name(client_secret, scope)
return gspread.authorize(creds)
def price(url):
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
return soup.find('h2', attrs={'class':'page-content entry-content'}).text.strip()
def url(base_url, stock_code):
return f'{base_url}{stock_code.upper()}'
if __name__ == '__main__':
parser = ArgumentParser(description='Takes stock codes, scrapes prices from website and inserts into a given google sheet')
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='base_url', required=True)
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='client_secret', required=True)
parser.add_argument('-g', '--google-sheet', action='store', help='the google sheet to insert prices into', type=str, dest='google_sheet', required=True)
parser.add_argument('-s', '--stock-codes', action='store', help='the stock codes to get price for', type=str, dest='stock_codes', nargs='+', required=True)
parser.add_argument('-t', '--test', action='store_true', help='Perform test', dest='test' )
args = parser.parse_args().__dict__
StockPriceScraper(**args).insert_prices()
|
import json
import os
import re
from collections import OrderedDict
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import infer_and_cast, Params, parse_overrides, unflatten, with_fallback
from allennlp.common.testing import AllenNlpTestCase
class TestParams(AllenNlpTestCase):
def test_load_from_file(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert "dataset_reader" in params
assert "trainer" in params
model_params = params.pop("model")
assert model_params.pop("type") == "simple_tagger"
def test_replace_none(self):
params = Params({"a": "None", "b": [1.0, "None", 2], "c": {"d": "None"}})
assert params["a"] is None
assert params["b"][1] is None
assert params["c"]["d"] is None
def test_bad_unicode_environment_variables(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
os.environ["BAD_ENVIRONMENT_VARIABLE"] = "\udce2"
Params.from_file(filename)
del os.environ["BAD_ENVIRONMENT_VARIABLE"]
def test_overrides(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
overrides = (
'{ "train_data_path": "FOO", "model": { "type": "BAR" },'
'"model.text_field_embedder.tokens.type": "BAZ",'
'"iterator.sorting_keys.0.0": "question"}'
)
params = Params.from_file(filename, overrides)
assert "dataset_reader" in params
assert "trainer" in params
assert params["train_data_path"] == "FOO"
assert params["iterator"]["sorting_keys"][0][0] == "question"
model_params = params.pop("model")
assert model_params.pop("type") == "BAR"
assert model_params["text_field_embedder"]["tokens"]["type"] == "BAZ"
def test_unflatten(self):
flattened = {"a.b.c": 1, "a.b.d": 0, "a.e.f.g.h": 2, "b": 3}
unflattened = unflatten(flattened)
assert unflattened == {"a": {"b": {"c": 1, "d": 0}, "e": {"f": {"g": {"h": 2}}}}, "b": 3}
# should do nothing to a non-flat dictionary
assert unflatten(unflattened) == unflattened
def test_with_fallback(self):
preferred = {"a": 1}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": 1, "b": 2}
# incompatibility is ok
preferred = {"a": {"c": 3}}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": {"c": 3}, "b": 2}
# goes deep
preferred = {"deep": {"a": 1}}
fallback = {"deep": {"a": 0, "b": 2}}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"deep": {"a": 1, "b": 2}}
def test_parse_overrides(self):
assert parse_overrides("") == {}
assert parse_overrides("{}") == {}
override_dict = parse_overrides('{"train_data": "/train", "trainer.num_epochs": 10}')
assert override_dict == {"train_data": "/train", "trainer": {"num_epochs": 10}}
params = with_fallback(
preferred=override_dict,
fallback={
"train_data": "/test",
"model": "simple_tagger",
"trainer": {"num_epochs": 100, "optimizer": "sgd"},
},
)
assert params == {
"train_data": "/train",
"model": "simple_tagger",
"trainer": {"num_epochs": 10, "optimizer": "sgd"},
}
def test_as_flat_dict(self):
params = Params({"a": 10, "b": {"c": 20, "d": "stuff"}}).as_flat_dict()
assert params == {"a": 10, "b.c": 20, "b.d": "stuff"}
def test_jsonnet_features(self):
config_file = self.TEST_DIR / "config.jsonnet"
with open(config_file, "w") as f:
f.write(
"""{
// This example is copied straight from the jsonnet docs
person1: {
name: "Alice",
welcome: "Hello " + self.name + "!",
},
person2: self.person1 { name: "Bob" },
}"""
)
params = Params.from_file(config_file)
alice = params.pop("person1")
bob = params.pop("person2")
assert alice.as_dict() == {"name": "Alice", "welcome": "Hello Alice!"}
assert bob.as_dict() == {"name": "Bob", "welcome": "Hello Bob!"}
params.assert_empty("TestParams")
def test_regexes_with_backslashes(self):
bad_regex = self.TEST_DIR / "bad_regex.jsonnet"
good_regex = self.TEST_DIR / "good_regex.jsonnet"
with open(bad_regex, "w") as f:
f.write(r'{"myRegex": "a\.b"}')
with open(good_regex, "w") as f:
f.write(r'{"myRegex": "a\\.b"}')
with pytest.raises(RuntimeError):
Params.from_file(bad_regex)
params = Params.from_file(good_regex)
regex = params["myRegex"]
assert re.match(regex, "a.b")
assert not re.match(regex, "a-b")
# Check roundtripping
good_regex2 = self.TEST_DIR / "good_regex2.jsonnet"
with open(good_regex2, "w") as f:
f.write(json.dumps(params.as_dict()))
params2 = Params.from_file(good_regex2)
assert params.as_dict() == params2.as_dict()
def test_env_var_substitution(self):
substitutor = self.TEST_DIR / "substitutor.jsonnet"
key = "TEST_ENV_VAR_SUBSTITUTION"
assert os.environ.get(key) is None
with open(substitutor, "w") as f:
f.write(f'{{'path': std.extVar('{key}")}}')
# raises without environment variable set
with pytest.raises(RuntimeError):
Params.from_file(substitutor)
os.environ[key] = "PERFECT"
params = Params.from_file(substitutor)
assert params["path"] == "PERFECT"
del os.environ[key]
@pytest.mark.xfail(
not os.path.exists(AllenNlpTestCase.PROJECT_ROOT / "training_config"),
reason="Training configs not installed with pip",
)
def test_known_configs(self):
configs = os.listdir(self.PROJECT_ROOT / "training_config")
# Our configs use environment variable substitution, and the _jsonnet parser
# will fail if we don't pass it correct environment variables.
forced_variables = [
# constituency parser
"PTB_TRAIN_PATH",
"PTB_DEV_PATH",
"PTB_TEST_PATH",
# dependency parser
"PTB_DEPENDENCIES_TRAIN",
"PTB_DEPENDENCIES_VAL",
# multilingual dependency parser
"TRAIN_PATHNAME",
"DEV_PATHNAME",
"TEST_PATHNAME",
# srl_elmo_5.5B
"SRL_TRAIN_DATA_PATH",
"SRL_VALIDATION_DATA_PATH",
# coref
"COREF_TRAIN_DATA_PATH",
"COREF_DEV_DATA_PATH",
"COREF_TEST_DATA_PATH",
# ner
"NER_TRAIN_DATA_PATH",
"NER_TEST_A_PATH",
"NER_TEST_B_PATH",
# bidirectional lm
"BIDIRECTIONAL_LM_TRAIN_PATH",
"BIDIRECTIONAL_LM_VOCAB_PATH",
"BIDIRECTIONAL_LM_ARCHIVE_PATH",
]
for var in forced_variables:
os.environ[var] = os.environ.get(var) or str(self.TEST_DIR)
for config in configs:
try:
Params.from_file(self.PROJECT_ROOT / "training_config" / config)
except Exception as e:
raise AssertionError(f"unable to load params for {config}, because {e}")
for var in forced_variables:
if os.environ[var] == str(self.TEST_DIR):
del os.environ[var]
def test_as_ordered_dict(self):
# keyD > keyC > keyE; keyDA > keyDB; Next all other keys alphabetically
preference_orders = [["keyD", "keyC", "keyE"], ["keyDA", "keyDB"]]
params = Params(
{
"keyC": "valC",
"keyB": "valB",
"keyA": "valA",
"keyE": "valE",
"keyD": {"keyDB": "valDB", "keyDA": "valDA"},
}
)
ordered_params_dict = params.as_ordered_dict(preference_orders)
expected_ordered_params_dict = OrderedDict(
{
"keyD": {"keyDA": "valDA", "keyDB": "valDB"},
"keyC": "valC",
"keyE": "valE",
"keyA": "valA",
"keyB": "valB",
}
)
assert json.dumps(ordered_params_dict) == json.dumps(expected_ordered_params_dict)
def test_to_file(self):
# Test to_file works with or without preference orders
params_dict = {"keyA": "valA", "keyB": "valB"}
expected_ordered_params_dict = OrderedDict({"keyB": "valB", "keyA": "valA"})
params = Params(params_dict)
file_path = self.TEST_DIR / "config.jsonnet"
# check with preference orders
params.to_file(file_path, [["keyB", "keyA"]])
with open(file_path, "r") as handle:
ordered_params_dict = OrderedDict(json.load(handle))
assert json.dumps(expected_ordered_params_dict) == json.dumps(ordered_params_dict)
# check without preference orders doesn't give error
params.to_file(file_path)
def test_infer_and_cast(self):
lots_of_strings = {
"a": ["10", "1.3", "true"],
"b": {"x": 10, "y": "20.1", "z": "other things"},
"c": "just a string",
}
casted = {
"a": [10, 1.3, True],
"b": {"x": 10, "y": 20.1, "z": "other things"},
"c": "just a string",
}
assert infer_and_cast(lots_of_strings) == casted
contains_bad_data = {"x": 10, "y": int}
with pytest.raises(ValueError, match="cannot infer type"):
infer_and_cast(contains_bad_data)
params = Params(lots_of_strings)
assert params.as_dict() == lots_of_strings
assert params.as_dict(infer_type_and_cast=True) == casted
def test_pop_choice(self):
choices = ["my_model", "other_model"]
params = Params({"model": "my_model"})
assert params.pop_choice("model", choices) == "my_model"
params = Params({"model": "non_existent_model"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices)
params = Params({"model": "module.submodule.ModelName"})
assert params.pop_choice("model", "choices") == "module.submodule.ModelName"
params = Params({"model": "module.submodule.ModelName"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices, allow_class_names=False)
|
import json
import os
import re
from collections import OrderedDict
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import infer_and_cast, Params, parse_overrides, unflatten, with_fallback
from allennlp.common.testing import AllenNlpTestCase
class TestParams(AllenNlpTestCase):
def test_load_from_file(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert "dataset_reader" in params
assert "trainer" in params
model_params = params.pop("model")
assert model_params.pop("type") == "simple_tagger"
def test_replace_none(self):
params = Params({"a": "None", "b": [1.0, "None", 2], "c": {"d": "None"}})
assert params["a"] is None
assert params["b"][1] is None
assert params["c"]["d"] is None
def test_bad_unicode_environment_variables(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
os.environ["BAD_ENVIRONMENT_VARIABLE"] = "\udce2"
Params.from_file(filename)
del os.environ["BAD_ENVIRONMENT_VARIABLE"]
def test_overrides(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
overrides = (
'{ "train_data_path": "FOO", "model": { "type": "BAR" },'
'"model.text_field_embedder.tokens.type": "BAZ",'
'"iterator.sorting_keys.0.0": "question"}'
)
params = Params.from_file(filename, overrides)
assert "dataset_reader" in params
assert "trainer" in params
assert params["train_data_path"] == "FOO"
assert params["iterator"]["sorting_keys"][0][0] == "question"
model_params = params.pop("model")
assert model_params.pop("type") == "BAR"
assert model_params["text_field_embedder"]["tokens"]["type"] == "BAZ"
def test_unflatten(self):
flattened = {"a.b.c": 1, "a.b.d": 0, "a.e.f.g.h": 2, "b": 3}
unflattened = unflatten(flattened)
assert unflattened == {"a": {"b": {"c": 1, "d": 0}, "e": {"f": {"g": {"h": 2}}}}, "b": 3}
# should do nothing to a non-flat dictionary
assert unflatten(unflattened) == unflattened
def test_with_fallback(self):
preferred = {"a": 1}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": 1, "b": 2}
# incompatibility is ok
preferred = {"a": {"c": 3}}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": {"c": 3}, "b": 2}
# goes deep
preferred = {"deep": {"a": 1}}
fallback = {"deep": {"a": 0, "b": 2}}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"deep": {"a": 1, "b": 2}}
def test_parse_overrides(self):
assert parse_overrides("") == {}
assert parse_overrides("{}") == {}
override_dict = parse_overrides('{"train_data": "/train", "trainer.num_epochs": 10}')
assert override_dict == {"train_data": "/train", "trainer": {"num_epochs": 10}}
params = with_fallback(
preferred=override_dict,
fallback={
"train_data": "/test",
"model": "simple_tagger",
"trainer": {"num_epochs": 100, "optimizer": "sgd"},
},
)
assert params == {
"train_data": "/train",
"model": "simple_tagger",
"trainer": {"num_epochs": 10, "optimizer": "sgd"},
}
def test_as_flat_dict(self):
params = Params({"a": 10, "b": {"c": 20, "d": "stuff"}}).as_flat_dict()
assert params == {"a": 10, "b.c": 20, "b.d": "stuff"}
def test_jsonnet_features(self):
config_file = self.TEST_DIR / "config.jsonnet"
with open(config_file, "w") as f:
f.write(
"""{
// This example is copied straight from the jsonnet docs
person1: {
name: "Alice",
welcome: "Hello " + self.name + "!",
},
person2: self.person1 { name: "Bob" },
}"""
)
params = Params.from_file(config_file)
alice = params.pop("person1")
bob = params.pop("person2")
assert alice.as_dict() == {"name": "Alice", "welcome": "Hello Alice!"}
assert bob.as_dict() == {"name": "Bob", "welcome": "Hello Bob!"}
params.assert_empty("TestParams")
def test_regexes_with_backslashes(self):
bad_regex = self.TEST_DIR / "bad_regex.jsonnet"
good_regex = self.TEST_DIR / "good_regex.jsonnet"
with open(bad_regex, "w") as f:
f.write(r'{"myRegex": "a\.b"}')
with open(good_regex, "w") as f:
f.write(r'{"myRegex": "a\\.b"}')
with pytest.raises(RuntimeError):
Params.from_file(bad_regex)
params = Params.from_file(good_regex)
regex = params["myRegex"]
assert re.match(regex, "a.b")
assert not re.match(regex, "a-b")
# Check roundtripping
good_regex2 = self.TEST_DIR / "good_regex2.jsonnet"
with open(good_regex2, "w") as f:
f.write(json.dumps(params.as_dict()))
params2 = Params.from_file(good_regex2)
assert params.as_dict() == params2.as_dict()
def test_env_var_substitution(self):
substitutor = self.TEST_DIR / "substitutor.jsonnet"
key = "TEST_ENV_VAR_SUBSTITUTION"
assert os.environ.get(key) is None
with open(substitutor, "w") as f:
f.write(f'{{"path": std.extVar("{key}")}}')
# raises without environment variable set
with pytest.raises(RuntimeError):
Params.from_file(substitutor)
os.environ[key] = "PERFECT"
params = Params.from_file(substitutor)
assert params["path"] == "PERFECT"
del os.environ[key]
@pytest.mark.xfail(
not os.path.exists(AllenNlpTestCase.PROJECT_ROOT / "training_config"),
reason="Training configs not installed with pip",
)
def test_known_configs(self):
configs = os.listdir(self.PROJECT_ROOT / "training_config")
# Our configs use environment variable substitution, and the _jsonnet parser
# will fail if we don't pass it correct environment variables.
forced_variables = [
# constituency parser
"PTB_TRAIN_PATH",
"PTB_DEV_PATH",
"PTB_TEST_PATH",
# dependency parser
"PTB_DEPENDENCIES_TRAIN",
"PTB_DEPENDENCIES_VAL",
# multilingual dependency parser
"TRAIN_PATHNAME",
"DEV_PATHNAME",
"TEST_PATHNAME",
# srl_elmo_5.5B
"SRL_TRAIN_DATA_PATH",
"SRL_VALIDATION_DATA_PATH",
# coref
"COREF_TRAIN_DATA_PATH",
"COREF_DEV_DATA_PATH",
"COREF_TEST_DATA_PATH",
# ner
"NER_TRAIN_DATA_PATH",
"NER_TEST_A_PATH",
"NER_TEST_B_PATH",
# bidirectional lm
"BIDIRECTIONAL_LM_TRAIN_PATH",
"BIDIRECTIONAL_LM_VOCAB_PATH",
"BIDIRECTIONAL_LM_ARCHIVE_PATH",
]
for var in forced_variables:
os.environ[var] = os.environ.get(var) or str(self.TEST_DIR)
for config in configs:
try:
Params.from_file(self.PROJECT_ROOT / "training_config" / config)
except Exception as e:
raise AssertionError(f"unable to load params for {config}, because {e}")
for var in forced_variables:
if os.environ[var] == str(self.TEST_DIR):
del os.environ[var]
def test_as_ordered_dict(self):
# keyD > keyC > keyE; keyDA > keyDB; Next all other keys alphabetically
preference_orders = [["keyD", "keyC", "keyE"], ["keyDA", "keyDB"]]
params = Params(
{
"keyC": "valC",
"keyB": "valB",
"keyA": "valA",
"keyE": "valE",
"keyD": {"keyDB": "valDB", "keyDA": "valDA"},
}
)
ordered_params_dict = params.as_ordered_dict(preference_orders)
expected_ordered_params_dict = OrderedDict(
{
"keyD": {"keyDA": "valDA", "keyDB": "valDB"},
"keyC": "valC",
"keyE": "valE",
"keyA": "valA",
"keyB": "valB",
}
)
assert json.dumps(ordered_params_dict) == json.dumps(expected_ordered_params_dict)
def test_to_file(self):
# Test to_file works with or without preference orders
params_dict = {"keyA": "valA", "keyB": "valB"}
expected_ordered_params_dict = OrderedDict({"keyB": "valB", "keyA": "valA"})
params = Params(params_dict)
file_path = self.TEST_DIR / "config.jsonnet"
# check with preference orders
params.to_file(file_path, [["keyB", "keyA"]])
with open(file_path, "r") as handle:
ordered_params_dict = OrderedDict(json.load(handle))
assert json.dumps(expected_ordered_params_dict) == json.dumps(ordered_params_dict)
# check without preference orders doesn't give error
params.to_file(file_path)
def test_infer_and_cast(self):
lots_of_strings = {
"a": ["10", "1.3", "true"],
"b": {"x": 10, "y": "20.1", "z": "other things"},
"c": "just a string",
}
casted = {
"a": [10, 1.3, True],
"b": {"x": 10, "y": 20.1, "z": "other things"},
"c": "just a string",
}
assert infer_and_cast(lots_of_strings) == casted
contains_bad_data = {"x": 10, "y": int}
with pytest.raises(ValueError, match="cannot infer type"):
infer_and_cast(contains_bad_data)
params = Params(lots_of_strings)
assert params.as_dict() == lots_of_strings
assert params.as_dict(infer_type_and_cast=True) == casted
def test_pop_choice(self):
choices = ["my_model", "other_model"]
params = Params({"model": "my_model"})
assert params.pop_choice("model", choices) == "my_model"
params = Params({"model": "non_existent_model"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices)
params = Params({"model": "module.submodule.ModelName"})
assert params.pop_choice("model", "choices") == "module.submodule.ModelName"
params = Params({"model": "module.submodule.ModelName"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices, allow_class_names=False)
|
"""RemoteJIT client/server config functions
"""
__all__ = ['RemoteJIT', 'Signature', 'Caller']
import os
import inspect
import warnings
import ctypes
from contextlib import nullcontext
from . import irtools
from .typesystem import Type, get_signature
from .thrift import Server, Dispatcher, dispatchermethod, Data, Client
from .utils import get_local_ip
from .targetinfo import TargetInfo
from .rbclib import tracing_allocator
# XXX WIP: the OmnisciCompilerPipeline is no longer omnisci-specific because
# we support Arrays even without omnisci, so it must be renamed and moved
# somewhere elsef
from .omnisci_backend import OmnisciCompilerPipeline
def isfunctionlike(obj):
"""Return True if object is function alike.
"""
if obj is None or isinstance(obj, (Signature, list, tuple, str, Caller)):
return False
return True
def extract_templates(options):
"""Extract templates mapping data from options dictionary.
If options does not contain "templates", it will be constructed
from all unknown options that have list values. Otherwise, the
corresponding value is returned with no further processing of
options content.
Parameters
----------
options : dict
Returns
-------
options : dict
A copy of input without templates mapping data.
templates : dict
Templates mapping which is a collections of pairs of template
name and a list of concrete types. Template name cannot
correspond to a concrete type.
"""
known_options = ['devices', 'local']
new_options = {}
templates = options.get('templates')
if templates is not None:
new_options.update(options)
del new_options['templates']
else:
templates = {}
for k, v in options.items():
if (isinstance(k, str) and isinstance(v, list) and k not in known_options):
templates[k] = v
else:
new_options[k] = v
return new_options, templates
class Signature(object):
"""Signature decorator for Python functions.
A Signature decorator may contain many signature objects
representing the prototypes of functions.
Signature decorators are re-usable and composeable. For example:
.. highlight:: python
.. code-block:: python
rjit = RemoteJIT(host='localhost' port=6274)
# remotebinaryfunc is Signature instance
remotebinaryfunc = rjit('int32(int32, int32)',
'float32(float32, float32)', ...)
# add will be Caller instance
@remotebinaryfunc
def add(a, b):
return a + b
# sub will be Caller instance
@remotebinaryfunc
def sub(a, b):
return a - b
add(1, 2) # returns 3
sub(1.0, 2.0) # returns -1.0
"""
def __init__(self, remotejit):
assert isinstance(remotejit, RemoteJIT), type(remotejit)
self.remotejit = remotejit
self.signatures = []
self.signature_devices = {}
self.signature_templates = {}
@property
def debug(self):
return self.remotejit.debug
@property
def local(self):
sig = Signature(self.remotejit.local)
sig.signatures.extend(self.signatures)
assert not self.signature_devices
assert not self.signature_templates
return sig
def __str__(self):
lst = ["'%s'" % (s,) for s in self.signatures]
return '%s(%s)' % (self.__class__.__name__, ', '.join(lst))
def __call__(self, obj, **options):
"""Decorate signatures or a function.
Parameters
----------
obj : {str, Signature, function, ...}
Specify object that represents a function type.
Keyword parameters
------------------
devices : list
Specify device names for the given set of signatures.
templates : dict
Specify template types mapping.
Returns
-------
result : {Signature, Caller}
If obj is a function, return Caller. Otherwise return self
that is extended with new signatures from obj.
Note
----
The validity of the input argument is not checked here. This
is because the bit-size of certain C types (e.g. size_t, long,
etc) depend on the target device which information will be
available at the compile stage. The target dependent
signatures can be retrieved using
`signature.get_signatures()`.
"""
if obj is None:
return self
options, templates = extract_templates(options)
devices = options.get('devices')
if isinstance(obj, Signature):
self.signatures.extend(obj.signatures)
self.signature_devices.update(obj.signature_devices)
self.remotejit.discard_last_compile()
if devices is not None:
for s in obj.signatures:
self.signature_devices[s] = devices
assert not templates
for s in obj.signatures:
t = obj.signature_templates.get(s)
if t is not None:
self.signature_templates[s] = t
return self
if isinstance(obj, Caller):
# return new Caller with extended signatures set
assert obj.remotejit is self.remotejit
final = Signature(self.remotejit)
final(self) # copies the signatures from self to final
final(obj.signature) # copies the signatures from obj to final
assert devices is None
assert not templates
return Caller(obj.func, final)
if isfunctionlike(obj):
final = Signature(self.remotejit)
final(self) # copies the signatures from self to final
assert devices is None
assert not templates
return Caller(obj, final)
self.signatures.append(obj)
self.remotejit.discard_last_compile()
if devices is not None:
self.signature_devices[obj] = devices
if templates:
self.signature_templates[obj] = templates
return self
def best_match(self, func, atypes: tuple) -> Type:
"""Return function type from signatures that matches best with given
argument types.
If no match is found, raise TypeError.
Parameters
----------
atypes : Type-tuple
Specify a tuple of argument types.
Returns
-------
ftype : Type
Function type that arguments match best with given argument
types.
"""
ftype = None
match_penalty = None
available_types = self.normalized(func).signatures
for typ in available_types:
penalty = typ.match(atypes)
if penalty is not None:
if ftype is None or penalty < match_penalty:
ftype = typ
match_penalty = penalty
if ftype is None:
satypes = ', '.join(map(str, atypes))
available = '; '.join(map(str, available_types))
raise TypeError(
f'found no matching function type to given argument types'
f' `{satypes}`. Available function types: {available}')
return ftype
def normalized(self, func=None):
"""Return a copy of Signature object where all signatures are
normalized to Type instances using the current target device
information.
Parameters
----------
func : {None, callable}
Python function that annotations are attached to signature.
Returns
-------
signature : Signature
"""
signature = Signature(self.remotejit)
fsig = Type.fromcallable(func) if func is not None else None
nargs = fsig.arity if func is not None else None
target_info = TargetInfo()
for sig in self.signatures:
devices = self.signature_devices.get(sig)
if not target_info.check_enabled(devices):
if self.debug:
print(f'{type(self).__name__}.normalized: skipping {sig} as'
f' not supported by devices: {devices}')
continue
templates = self.signature_templates.get(sig, {})
sig = Type.fromobject(sig)
if not sig.is_complete:
warnings.warn(f'Incomplete signature {sig} will be ignored')
continue
if not sig.is_function:
raise ValueError(
'expected signature representing function type,'
f' got `{sig}`')
if nargs is None:
nargs = sig.arity
elif sig.arity != nargs:
raise ValueError(f'signature `{sig}` must have arity {nargs}'
f' but got {len(sig[1])}')
if fsig is not None:
sig.inherit_annotations(fsig)
if not sig.is_concrete:
for csig in sig.apply_templates(templates):
assert isinstance(csig, Type), (sig, csig, type(csig))
if csig not in signature.signatures:
signature.signatures.append(csig)
else:
if sig not in signature.signatures:
signature.signatures.append(sig)
if fsig is not None and fsig.is_complete:
if fsig not in signature.signatures:
signature.signatures.append(fsig)
return signature
class Caller(object):
"""Remote JIT caller, holds the decorated function that can be
executed remotely.
"""
def __init__(self, func, signature: Signature):
"""Construct remote JIT caller instance.
Parameters
----------
func : callable
Specify a Python function that is used as a template to
remotely JIT compiled functions.
signature : Signature
Specify a collection of signatures.
local : bool
When True, local process will be interpreted as
remote. Useful for debugging.
"""
self.remotejit = signature.remotejit
self.signature = signature
func = self.remotejit.preprocess_callable(func)
self.func = func
self.nargs = len(get_signature(func).parameters)
# Attributes used in RBC user-interface
self._is_compiled = set() # items are (fname, ftype)
self._client = None
self.remotejit.add_caller(self)
@property
def local(self):
"""Return Caller instance that executes function calls on the local
host. Useful for debugging.
"""
return Caller(self.func, self.signature.local)
def __repr__(self):
return '%s(%s, %s, local=%s)' % (type(self).__name__, self.func,
self.signature, self.local)
def __str__(self):
return self.describe()
def describe(self):
"""Return LLVM IRs of all target devices.
"""
lst = ['']
fid = 0
for device, target_info in self.remotejit.targets.items():
with Type.alias(**self.remotejit.typesystem_aliases):
with target_info:
lst.append(f'{device:-^80}')
signatures = self.get_signatures()
signatures_map = {}
for sig in signatures:
fid += 1
signatures_map[fid] = sig
llvm_module, succesful_fids = irtools.compile_to_LLVM(
[(self.func, signatures_map)],
target_info,
pipeline_class=OmnisciCompilerPipeline,
debug=self.remotejit.debug)
lst.append(str(llvm_module))
lst.append(f'{'':-^80}')
return '\n'.join(lst)
def get_signatures(self):
"""Return a list of normalized signatures for given target device.
"""
return self.signature.normalized(self.func).signatures
# RBC user-interface
def __call__(self, *arguments, **options):
"""Return the result of a remote JIT compiled function call.
"""
device = options.get('device')
targets = self.remotejit.targets
if device is None:
if len(targets) > 1:
raise TypeError(
f'specifying device is required when target has more than'
f' one device. Available devices: {', '.join(targets)}')
device = tuple(targets)[0]
target_info = targets[device]
with target_info:
atypes = tuple(map(Type.fromvalue, arguments))
ftype = self.signature.best_match(self.func, atypes)
key = self.func.__name__, ftype
if key not in self._is_compiled:
self.remotejit.remote_compile(self.func, ftype, target_info)
self._is_compiled.add(key)
return self.remotejit.remote_call(self.func, ftype, arguments)
class RemoteJIT(object):
"""RemoteJIT is a decorator generator for user functions to be
remotely JIT compiled.
To use, define
.. highlight:: python
.. code-block:: python
rjit = RemoteJIT(host='localhost', port=6274)
@rjit
def foo(a: int, b: int) -> int:
return a + b
@rjit('double(double, double)',
'int64(int64, int64)')
def bar(a, b):
return a + b
# Finally, call
c = foo(1, 2) # c = 3
b = bar(7.0, 1.0) # b = 8.0
The sum will be evaluated in the remote host.
"""
multiplexed = True
thrift_content = None
typesystem_aliases = dict()
def __init__(self, host='localhost', port=11532,
local=False, debug=False, use_tracing_allocator=False):
"""Construct remote JIT function decorator.
The decorator is re-usable for different functions.
Parameters
----------
host : str
Specify the host name of IP of JIT server
port : {int, str}
Specify the service port of the JIT server
local : bool
When True, use local client. Useful for debugging.
debug : bool
When True, output debug messages.
use_tracing_allocator : bool
When True, enable the automatic detection of memory leaks.
"""
if host == 'localhost':
host = get_local_ip()
if use_tracing_allocator and not local:
raise ValueError('use_tracing_allocator=True can be used only with local=True')
self.debug = debug
self.use_tracing_allocator = use_tracing_allocator
self.host = host
self.port = int(port)
self.server_process = None
# A collection of Caller instances. Each represents a function
# that have many argument type dependent implementations.
self._callers = []
self._last_compile = None
self._targets = None
if local:
self._client = LocalClient(debug=debug,
use_tracing_allocator=use_tracing_allocator)
else:
self._client = None
@property
def local(self):
localjit = type(self)(local=True, debug=self.debug)
localjit._callers.extend(self._callers)
return localjit
def add_caller(self, caller):
self._callers.append(caller)
self.discard_last_compile()
def get_callers(self):
return self._callers
def reset(self):
"""Drop all callers definitions and compilation results.
"""
self._callers.clear()
self.discard_last_compile()
@property
def have_last_compile(self):
"""Check if compile data exists.
See `set_last_compile` method for more information.
"""
return self._last_compile is not None
def discard_last_compile(self):
"""Discard compile data.
See `set_last_compile` method for more information.
"""
self._last_compile = None
def set_last_compile(self, compile_data):
"""Save compile data.
The caller is responsible for discarding previous compiler
data by calling `discard_last_compile` method.
Parameters
----------
compile_data : object
Compile data can be any Python object. When None, it is
interpreted as no compile data is available.
Notes
-----
The have/discard/set_last_compile methods provide a way to
avoid unnecessary compilations when the remote server supports
registration of compiled functions. The corresponding
`register` method is expected to use the following pattern:
.. code-block:: python
def register(self):
if self.have_last_compile:
return
<compile defined functions>
self.set_last_compile(<compilation results>)
The `discard_last_compile()` method is called when the compile
data becomes obsolete or needs to be discarded. For instance,
the compile data will be discarded when calling the following
methods: `reset`, `add_caller`. Note that the `add_caller`
call is triggered when applying the remotejit decorator to a
Python function to be compiled.
"""
assert self._last_compile is None
self._last_compile = compile_data
def get_pending_names(self):
"""Return the names of functions that have not been registered to the
remote server.
"""
names = set()
if not self.have_last_compile:
for caller in reversed(self.get_callers()):
names.add(caller.func.__name__)
return names
def retrieve_targets(self):
"""Retrieve target device information from remote client.
Redefine this method if remote client is not native.
Returns
-------
targets : dict
Map of target device names and informations.
"""
# TODO: rename thrift API targets to get_device_parameters?
response = self.client(remotejit=dict(targets=()))
targets = {}
for device, data in response['remotejit']['targets'].items():
targets[device] = TargetInfo.fromjson(data)
return targets
@property
def targets(self):
"""Return device-target_info mapping of the remote server.
"""
if self._targets is None:
self._targets = self.retrieve_targets()
return self._targets
def __call__(self, *signatures, **options):
"""Define a remote JIT function signatures and template.
Parameters
----------
signatures : tuple
Specify signatures of a remote JIT function, or a Python
function as a template from which the remote JIT function
will be compiled.
Keyword parameters
------------------
local : bool
devices : list
Specify device names for the given set of signatures.
templates : dict
Specify template types mapping.
Returns
-------
sig: {Signature, Caller}
Signature decorator or Caller
Notes
-----
The signatures can be strings in the following form:
"<return type>(<argument type 1>, <argument type 2>, ...)"
or any other object that can be converted to function type,
see `Type.fromobject` for more information.
"""
if options.get('local'):
s = Signature(self.local)
else:
s = Signature(self)
devices = options.get('devices')
options, templates = extract_templates(options)
for sig in signatures:
s = s(sig, devices=devices, templates=templates)
return s
def start_server(self, background=False):
"""Start remotejit server from client.
"""
thrift_file = os.path.join(os.path.dirname(__file__),
'remotejit.thrift')
print('staring rpc.thrift server: %s' % (thrift_file), end='',
flush=True)
if self.debug:
print(flush=True)
dispatcher = DebugDispatcherRJIT
else:
dispatcher = DispatcherRJIT
if background:
ps = Server.run_bg(dispatcher, thrift_file,
dict(host=self.host, port=self.port,
debug=self.debug))
self.server_process = ps
else:
Server.run(dispatcher, thrift_file,
dict(host=self.host, port=self.port,
debug=self.debug))
print('... rpc.thrift server stopped', flush=True)
def stop_server(self):
"""Stop remotejit server from client.
"""
if self.server_process is not None and self.server_process.is_alive():
print('... stopping rpc.thrift server')
self.server_process.terminate()
self.server_process = None
@property
def client(self):
"""Return remote host connection as Client instance.
"""
if self._client is None:
self._client = Client(
host=self.host,
port=self.port,
multiplexed=self.multiplexed,
thrift_content=self.thrift_content,
socket_timeout=60000)
return self._client
def remote_compile(self, func, ftype: Type, target_info: TargetInfo):
"""Remote compile function and signatures to machine code.
The input function `func` is compiled to LLVM IR module, the
LLVM IR module is sent to remote host where the remote host is
expected to complete the compilation process.
Return the corresponding LLVM IR module instance which may be
useful for debugging.
"""
if self.debug:
print(f'remote_compile({func}, {ftype})')
llvm_module, succesful_fids = irtools.compile_to_LLVM(
[(func, {0: ftype})],
target_info,
pipeline_class=OmnisciCompilerPipeline,
debug=self.debug)
ir = str(llvm_module)
mangled_signatures = ';'.join([s.mangle() for s in [ftype]])
response = self.client(remotejit=dict(
compile=(func.__name__, mangled_signatures, ir)))
assert response['remotejit']['compile'], response
return llvm_module
def remote_call(self, func, ftype: Type, arguments: tuple):
"""Call function remotely on given arguments.
The input function `func` is called remotely by sending the
arguments data to remote host where the previously compiled
function (see `remote_compile` method) is applied to the
arguments, and the result is returned to local process.
"""
if self.debug:
print(f'remote_call({func}, {ftype}, {arguments})')
fullname = func.__name__ + ftype.mangle()
response = self.client(remotejit=dict(call=(fullname, arguments)))
return response['remotejit']['call']
def python(self, statement):
"""Execute Python statement remotely.
"""
response = self.client(remotejit=dict(python=(statement,)))
return response['remotejit']['python']
def preprocess_callable(self, func):
"""Preprocess func to be used as a remotejit function definition.
Parameters
----------
func : callable
Returns
-------
func : callable
Preprocessed func.
"""
return func
class DispatcherRJIT(Dispatcher):
"""Implements remotejit service methods.
"""
def __init__(self, server, debug=False, use_tracing_allocator=False):
super().__init__(server, debug=debug)
self.use_tracing_allocator = use_tracing_allocator
self.compiled_functions = dict()
self.engines = dict()
self.python_globals = dict()
self.python_locals = dict()
@dispatchermethod
def targets(self) -> dict:
"""Retrieve target device information.
Returns
-------
info : dict
Map of target devices and their properties.
"""
if self.use_tracing_allocator:
target_info = TargetInfo.host(name='host_cpu_tracing_allocator',
use_tracing_allocator=True)
else:
target_info = TargetInfo.host()
target_info.set('has_numba', True)
target_info.set('has_cpython', True)
return dict(cpu=target_info.tojson())
@dispatchermethod
def compile(self, name: str, signatures: str, ir: str) -> int:
"""JIT compile function.
Parameters
----------
name : str
Specify the function name.
signatures : str
Specify semi-colon separated list of mangled signatures.
ir : str
Specify LLVM IR representation of the function.
"""
engine = irtools.compile_IR(ir)
for msig in signatures.split(';'):
sig = Type.demangle(msig)
ctypes_sig = sig.toctypes()
assert sig.is_function
if sig[0].is_aggregate:
raise RuntimeError(
f'Functions with aggregate return type values are not supported,'
f' got function `{name}` with `{sig}` signature')
fullname = name + msig
addr = engine.get_function_address(fullname)
if self.debug:
print(f'compile({name}, {sig}) -> {hex(addr)}')
# storing engine as the owner of function addresses
if addr:
self.compiled_functions[fullname] = engine, ctypes_sig(addr), sig, ctypes_sig
else:
warnings.warn('No compilation result for {name}|{sig=}')
return True
@dispatchermethod
def call(self, fullname: str, arguments: tuple) -> Data:
"""Call JIT compiled function
Parameters
----------
fullname : str
Specify the full name of the function that is in form
"<name><mangled signature>"
arguments : tuple
Specify the arguments to the function.
"""
# if we are using a tracing allocator, automatically detect memory leaks
# at each call.
if self.use_tracing_allocator:
leak_detector = tracing_allocator.new_leak_detector()
else:
leak_detector = nullcontext()
with leak_detector:
return self._do_call(fullname, arguments)
def _do_call(self, fullname, arguments):
if self.debug:
print(f'call({fullname}, {arguments})')
ef = self.compiled_functions.get(fullname)
if ef is None:
raise RuntimeError(
f'no such compiled function `{fullname}`. Available functions:\n'
f' {'; '.join(list(self.compiled_functions))}\n.')
sig = ef[2]
ctypes_sig = ef[3]
if len(arguments) == 0:
assert sig.arity == 1 and sig[1][0].is_void, sig
else:
assert len(arguments) == sig.arity, (len(arguments), sig.arity)
ctypes_arguments = []
for typ, ctypes_typ, value in zip(sig[1], ctypes_sig._argtypes_, arguments):
if typ.is_custom:
typ = typ.get_struct_type()
if typ.is_struct:
if isinstance(value, tuple):
member_values = [t.toctypes()(value[i]) for i, t in enumerate(typ)]
else:
member_values = [t.toctypes()(getattr(value, t.name)) for t in typ]
ctypes_arguments.extend(member_values)
elif typ.is_pointer:
if isinstance(value, ctypes.c_void_p):
value = ctypes.cast(value, ctypes_typ)
else:
value = ctypes.cast(value, ctypes_typ)
ctypes_arguments.append(value)
else:
ctypes_arguments.append(value)
r = ef[1](*ctypes_arguments)
if sig[0].is_pointer and sig[0][0].is_void and isinstance(r, int):
r = ctypes.c_void_p(r)
if self.debug:
print(f'-> {r}')
if hasattr(r, 'topython'):
return r.topython()
return r
@dispatchermethod
def python(self, statement: str) -> int:
"""Execute Python statement.
"""
if self.debug:
print(f'python({statement!r})')
exec(statement, self.python_globals, self.python_locals)
return True
class DebugDispatcherRJIT(DispatcherRJIT):
"""
Enables debug messages.
"""
debug = True
class LocalClient(object):
"""Pretender of thrift.Client.
All calls will be made in a local process. Useful for debbuging.
"""
def __init__(self, debug=False, use_tracing_allocator=False):
self.dispatcher = DispatcherRJIT(None, debug=debug,
use_tracing_allocator=use_tracing_allocator)
def __call__(self, **services):
results = {}
for service_name, query_dict in services.items():
results[service_name] = {}
for mthname, args in query_dict.items():
mth = getattr(self.dispatcher, mthname)
mth = inspect.unwrap(mth)
results[service_name][mthname] = mth(self.dispatcher, *args)
return results
|
"""RemoteJIT client/server config functions
"""
__all__ = ['RemoteJIT', 'Signature', 'Caller']
import os
import inspect
import warnings
import ctypes
from contextlib import nullcontext
from . import irtools
from .typesystem import Type, get_signature
from .thrift import Server, Dispatcher, dispatchermethod, Data, Client
from .utils import get_local_ip
from .targetinfo import TargetInfo
from .rbclib import tracing_allocator
# XXX WIP: the OmnisciCompilerPipeline is no longer omnisci-specific because
# we support Arrays even without omnisci, so it must be renamed and moved
# somewhere elsef
from .omnisci_backend import OmnisciCompilerPipeline
def isfunctionlike(obj):
"""Return True if object is function alike.
"""
if obj is None or isinstance(obj, (Signature, list, tuple, str, Caller)):
return False
return True
def extract_templates(options):
"""Extract templates mapping data from options dictionary.
If options does not contain "templates", it will be constructed
from all unknown options that have list values. Otherwise, the
corresponding value is returned with no further processing of
options content.
Parameters
----------
options : dict
Returns
-------
options : dict
A copy of input without templates mapping data.
templates : dict
Templates mapping which is a collections of pairs of template
name and a list of concrete types. Template name cannot
correspond to a concrete type.
"""
known_options = ['devices', 'local']
new_options = {}
templates = options.get('templates')
if templates is not None:
new_options.update(options)
del new_options['templates']
else:
templates = {}
for k, v in options.items():
if (isinstance(k, str) and isinstance(v, list) and k not in known_options):
templates[k] = v
else:
new_options[k] = v
return new_options, templates
class Signature(object):
"""Signature decorator for Python functions.
A Signature decorator may contain many signature objects
representing the prototypes of functions.
Signature decorators are re-usable and composeable. For example:
.. highlight:: python
.. code-block:: python
rjit = RemoteJIT(host='localhost' port=6274)
# remotebinaryfunc is Signature instance
remotebinaryfunc = rjit('int32(int32, int32)',
'float32(float32, float32)', ...)
# add will be Caller instance
@remotebinaryfunc
def add(a, b):
return a + b
# sub will be Caller instance
@remotebinaryfunc
def sub(a, b):
return a - b
add(1, 2) # returns 3
sub(1.0, 2.0) # returns -1.0
"""
def __init__(self, remotejit):
assert isinstance(remotejit, RemoteJIT), type(remotejit)
self.remotejit = remotejit
self.signatures = []
self.signature_devices = {}
self.signature_templates = {}
@property
def debug(self):
return self.remotejit.debug
@property
def local(self):
sig = Signature(self.remotejit.local)
sig.signatures.extend(self.signatures)
assert not self.signature_devices
assert not self.signature_templates
return sig
def __str__(self):
lst = ["'%s'" % (s,) for s in self.signatures]
return '%s(%s)' % (self.__class__.__name__, ', '.join(lst))
def __call__(self, obj, **options):
"""Decorate signatures or a function.
Parameters
----------
obj : {str, Signature, function, ...}
Specify object that represents a function type.
Keyword parameters
------------------
devices : list
Specify device names for the given set of signatures.
templates : dict
Specify template types mapping.
Returns
-------
result : {Signature, Caller}
If obj is a function, return Caller. Otherwise return self
that is extended with new signatures from obj.
Note
----
The validity of the input argument is not checked here. This
is because the bit-size of certain C types (e.g. size_t, long,
etc) depend on the target device which information will be
available at the compile stage. The target dependent
signatures can be retrieved using
`signature.get_signatures()`.
"""
if obj is None:
return self
options, templates = extract_templates(options)
devices = options.get('devices')
if isinstance(obj, Signature):
self.signatures.extend(obj.signatures)
self.signature_devices.update(obj.signature_devices)
self.remotejit.discard_last_compile()
if devices is not None:
for s in obj.signatures:
self.signature_devices[s] = devices
assert not templates
for s in obj.signatures:
t = obj.signature_templates.get(s)
if t is not None:
self.signature_templates[s] = t
return self
if isinstance(obj, Caller):
# return new Caller with extended signatures set
assert obj.remotejit is self.remotejit
final = Signature(self.remotejit)
final(self) # copies the signatures from self to final
final(obj.signature) # copies the signatures from obj to final
assert devices is None
assert not templates
return Caller(obj.func, final)
if isfunctionlike(obj):
final = Signature(self.remotejit)
final(self) # copies the signatures from self to final
assert devices is None
assert not templates
return Caller(obj, final)
self.signatures.append(obj)
self.remotejit.discard_last_compile()
if devices is not None:
self.signature_devices[obj] = devices
if templates:
self.signature_templates[obj] = templates
return self
def best_match(self, func, atypes: tuple) -> Type:
"""Return function type from signatures that matches best with given
argument types.
If no match is found, raise TypeError.
Parameters
----------
atypes : Type-tuple
Specify a tuple of argument types.
Returns
-------
ftype : Type
Function type that arguments match best with given argument
types.
"""
ftype = None
match_penalty = None
available_types = self.normalized(func).signatures
for typ in available_types:
penalty = typ.match(atypes)
if penalty is not None:
if ftype is None or penalty < match_penalty:
ftype = typ
match_penalty = penalty
if ftype is None:
satypes = ', '.join(map(str, atypes))
available = '; '.join(map(str, available_types))
raise TypeError(
f'found no matching function type to given argument types'
f' `{satypes}`. Available function types: {available}')
return ftype
def normalized(self, func=None):
"""Return a copy of Signature object where all signatures are
normalized to Type instances using the current target device
information.
Parameters
----------
func : {None, callable}
Python function that annotations are attached to signature.
Returns
-------
signature : Signature
"""
signature = Signature(self.remotejit)
fsig = Type.fromcallable(func) if func is not None else None
nargs = fsig.arity if func is not None else None
target_info = TargetInfo()
for sig in self.signatures:
devices = self.signature_devices.get(sig)
if not target_info.check_enabled(devices):
if self.debug:
print(f'{type(self).__name__}.normalized: skipping {sig} as'
f' not supported by devices: {devices}')
continue
templates = self.signature_templates.get(sig, {})
sig = Type.fromobject(sig)
if not sig.is_complete:
warnings.warn(f'Incomplete signature {sig} will be ignored')
continue
if not sig.is_function:
raise ValueError(
'expected signature representing function type,'
f' got `{sig}`')
if nargs is None:
nargs = sig.arity
elif sig.arity != nargs:
raise ValueError(f'signature `{sig}` must have arity {nargs}'
f' but got {len(sig[1])}')
if fsig is not None:
sig.inherit_annotations(fsig)
if not sig.is_concrete:
for csig in sig.apply_templates(templates):
assert isinstance(csig, Type), (sig, csig, type(csig))
if csig not in signature.signatures:
signature.signatures.append(csig)
else:
if sig not in signature.signatures:
signature.signatures.append(sig)
if fsig is not None and fsig.is_complete:
if fsig not in signature.signatures:
signature.signatures.append(fsig)
return signature
class Caller(object):
"""Remote JIT caller, holds the decorated function that can be
executed remotely.
"""
def __init__(self, func, signature: Signature):
"""Construct remote JIT caller instance.
Parameters
----------
func : callable
Specify a Python function that is used as a template to
remotely JIT compiled functions.
signature : Signature
Specify a collection of signatures.
local : bool
When True, local process will be interpreted as
remote. Useful for debugging.
"""
self.remotejit = signature.remotejit
self.signature = signature
func = self.remotejit.preprocess_callable(func)
self.func = func
self.nargs = len(get_signature(func).parameters)
# Attributes used in RBC user-interface
self._is_compiled = set() # items are (fname, ftype)
self._client = None
self.remotejit.add_caller(self)
@property
def local(self):
"""Return Caller instance that executes function calls on the local
host. Useful for debugging.
"""
return Caller(self.func, self.signature.local)
def __repr__(self):
return '%s(%s, %s, local=%s)' % (type(self).__name__, self.func,
self.signature, self.local)
def __str__(self):
return self.describe()
def describe(self):
"""Return LLVM IRs of all target devices.
"""
lst = ['']
fid = 0
for device, target_info in self.remotejit.targets.items():
with Type.alias(**self.remotejit.typesystem_aliases):
with target_info:
lst.append(f'{device:-^80}')
signatures = self.get_signatures()
signatures_map = {}
for sig in signatures:
fid += 1
signatures_map[fid] = sig
llvm_module, succesful_fids = irtools.compile_to_LLVM(
[(self.func, signatures_map)],
target_info,
pipeline_class=OmnisciCompilerPipeline,
debug=self.remotejit.debug)
lst.append(str(llvm_module))
lst.append(f'{"":-^80}')
return '\n'.join(lst)
def get_signatures(self):
"""Return a list of normalized signatures for given target device.
"""
return self.signature.normalized(self.func).signatures
# RBC user-interface
def __call__(self, *arguments, **options):
"""Return the result of a remote JIT compiled function call.
"""
device = options.get('device')
targets = self.remotejit.targets
if device is None:
if len(targets) > 1:
raise TypeError(
f'specifying device is required when target has more than'
f' one device. Available devices: {", ".join(targets)}')
device = tuple(targets)[0]
target_info = targets[device]
with target_info:
atypes = tuple(map(Type.fromvalue, arguments))
ftype = self.signature.best_match(self.func, atypes)
key = self.func.__name__, ftype
if key not in self._is_compiled:
self.remotejit.remote_compile(self.func, ftype, target_info)
self._is_compiled.add(key)
return self.remotejit.remote_call(self.func, ftype, arguments)
class RemoteJIT(object):
"""RemoteJIT is a decorator generator for user functions to be
remotely JIT compiled.
To use, define
.. highlight:: python
.. code-block:: python
rjit = RemoteJIT(host='localhost', port=6274)
@rjit
def foo(a: int, b: int) -> int:
return a + b
@rjit('double(double, double)',
'int64(int64, int64)')
def bar(a, b):
return a + b
# Finally, call
c = foo(1, 2) # c = 3
b = bar(7.0, 1.0) # b = 8.0
The sum will be evaluated in the remote host.
"""
multiplexed = True
thrift_content = None
typesystem_aliases = dict()
def __init__(self, host='localhost', port=11532,
local=False, debug=False, use_tracing_allocator=False):
"""Construct remote JIT function decorator.
The decorator is re-usable for different functions.
Parameters
----------
host : str
Specify the host name of IP of JIT server
port : {int, str}
Specify the service port of the JIT server
local : bool
When True, use local client. Useful for debugging.
debug : bool
When True, output debug messages.
use_tracing_allocator : bool
When True, enable the automatic detection of memory leaks.
"""
if host == 'localhost':
host = get_local_ip()
if use_tracing_allocator and not local:
raise ValueError('use_tracing_allocator=True can be used only with local=True')
self.debug = debug
self.use_tracing_allocator = use_tracing_allocator
self.host = host
self.port = int(port)
self.server_process = None
# A collection of Caller instances. Each represents a function
# that have many argument type dependent implementations.
self._callers = []
self._last_compile = None
self._targets = None
if local:
self._client = LocalClient(debug=debug,
use_tracing_allocator=use_tracing_allocator)
else:
self._client = None
@property
def local(self):
localjit = type(self)(local=True, debug=self.debug)
localjit._callers.extend(self._callers)
return localjit
def add_caller(self, caller):
self._callers.append(caller)
self.discard_last_compile()
def get_callers(self):
return self._callers
def reset(self):
"""Drop all callers definitions and compilation results.
"""
self._callers.clear()
self.discard_last_compile()
@property
def have_last_compile(self):
"""Check if compile data exists.
See `set_last_compile` method for more information.
"""
return self._last_compile is not None
def discard_last_compile(self):
"""Discard compile data.
See `set_last_compile` method for more information.
"""
self._last_compile = None
def set_last_compile(self, compile_data):
"""Save compile data.
The caller is responsible for discarding previous compiler
data by calling `discard_last_compile` method.
Parameters
----------
compile_data : object
Compile data can be any Python object. When None, it is
interpreted as no compile data is available.
Notes
-----
The have/discard/set_last_compile methods provide a way to
avoid unnecessary compilations when the remote server supports
registration of compiled functions. The corresponding
`register` method is expected to use the following pattern:
.. code-block:: python
def register(self):
if self.have_last_compile:
return
<compile defined functions>
self.set_last_compile(<compilation results>)
The `discard_last_compile()` method is called when the compile
data becomes obsolete or needs to be discarded. For instance,
the compile data will be discarded when calling the following
methods: `reset`, `add_caller`. Note that the `add_caller`
call is triggered when applying the remotejit decorator to a
Python function to be compiled.
"""
assert self._last_compile is None
self._last_compile = compile_data
def get_pending_names(self):
"""Return the names of functions that have not been registered to the
remote server.
"""
names = set()
if not self.have_last_compile:
for caller in reversed(self.get_callers()):
names.add(caller.func.__name__)
return names
def retrieve_targets(self):
"""Retrieve target device information from remote client.
Redefine this method if remote client is not native.
Returns
-------
targets : dict
Map of target device names and informations.
"""
# TODO: rename thrift API targets to get_device_parameters?
response = self.client(remotejit=dict(targets=()))
targets = {}
for device, data in response['remotejit']['targets'].items():
targets[device] = TargetInfo.fromjson(data)
return targets
@property
def targets(self):
"""Return device-target_info mapping of the remote server.
"""
if self._targets is None:
self._targets = self.retrieve_targets()
return self._targets
def __call__(self, *signatures, **options):
"""Define a remote JIT function signatures and template.
Parameters
----------
signatures : tuple
Specify signatures of a remote JIT function, or a Python
function as a template from which the remote JIT function
will be compiled.
Keyword parameters
------------------
local : bool
devices : list
Specify device names for the given set of signatures.
templates : dict
Specify template types mapping.
Returns
-------
sig: {Signature, Caller}
Signature decorator or Caller
Notes
-----
The signatures can be strings in the following form:
"<return type>(<argument type 1>, <argument type 2>, ...)"
or any other object that can be converted to function type,
see `Type.fromobject` for more information.
"""
if options.get('local'):
s = Signature(self.local)
else:
s = Signature(self)
devices = options.get('devices')
options, templates = extract_templates(options)
for sig in signatures:
s = s(sig, devices=devices, templates=templates)
return s
def start_server(self, background=False):
"""Start remotejit server from client.
"""
thrift_file = os.path.join(os.path.dirname(__file__),
'remotejit.thrift')
print('staring rpc.thrift server: %s' % (thrift_file), end='',
flush=True)
if self.debug:
print(flush=True)
dispatcher = DebugDispatcherRJIT
else:
dispatcher = DispatcherRJIT
if background:
ps = Server.run_bg(dispatcher, thrift_file,
dict(host=self.host, port=self.port,
debug=self.debug))
self.server_process = ps
else:
Server.run(dispatcher, thrift_file,
dict(host=self.host, port=self.port,
debug=self.debug))
print('... rpc.thrift server stopped', flush=True)
def stop_server(self):
"""Stop remotejit server from client.
"""
if self.server_process is not None and self.server_process.is_alive():
print('... stopping rpc.thrift server')
self.server_process.terminate()
self.server_process = None
@property
def client(self):
"""Return remote host connection as Client instance.
"""
if self._client is None:
self._client = Client(
host=self.host,
port=self.port,
multiplexed=self.multiplexed,
thrift_content=self.thrift_content,
socket_timeout=60000)
return self._client
def remote_compile(self, func, ftype: Type, target_info: TargetInfo):
"""Remote compile function and signatures to machine code.
The input function `func` is compiled to LLVM IR module, the
LLVM IR module is sent to remote host where the remote host is
expected to complete the compilation process.
Return the corresponding LLVM IR module instance which may be
useful for debugging.
"""
if self.debug:
print(f'remote_compile({func}, {ftype})')
llvm_module, succesful_fids = irtools.compile_to_LLVM(
[(func, {0: ftype})],
target_info,
pipeline_class=OmnisciCompilerPipeline,
debug=self.debug)
ir = str(llvm_module)
mangled_signatures = ';'.join([s.mangle() for s in [ftype]])
response = self.client(remotejit=dict(
compile=(func.__name__, mangled_signatures, ir)))
assert response['remotejit']['compile'], response
return llvm_module
def remote_call(self, func, ftype: Type, arguments: tuple):
"""Call function remotely on given arguments.
The input function `func` is called remotely by sending the
arguments data to remote host where the previously compiled
function (see `remote_compile` method) is applied to the
arguments, and the result is returned to local process.
"""
if self.debug:
print(f'remote_call({func}, {ftype}, {arguments})')
fullname = func.__name__ + ftype.mangle()
response = self.client(remotejit=dict(call=(fullname, arguments)))
return response['remotejit']['call']
def python(self, statement):
"""Execute Python statement remotely.
"""
response = self.client(remotejit=dict(python=(statement,)))
return response['remotejit']['python']
def preprocess_callable(self, func):
"""Preprocess func to be used as a remotejit function definition.
Parameters
----------
func : callable
Returns
-------
func : callable
Preprocessed func.
"""
return func
class DispatcherRJIT(Dispatcher):
"""Implements remotejit service methods.
"""
def __init__(self, server, debug=False, use_tracing_allocator=False):
super().__init__(server, debug=debug)
self.use_tracing_allocator = use_tracing_allocator
self.compiled_functions = dict()
self.engines = dict()
self.python_globals = dict()
self.python_locals = dict()
@dispatchermethod
def targets(self) -> dict:
"""Retrieve target device information.
Returns
-------
info : dict
Map of target devices and their properties.
"""
if self.use_tracing_allocator:
target_info = TargetInfo.host(name='host_cpu_tracing_allocator',
use_tracing_allocator=True)
else:
target_info = TargetInfo.host()
target_info.set('has_numba', True)
target_info.set('has_cpython', True)
return dict(cpu=target_info.tojson())
@dispatchermethod
def compile(self, name: str, signatures: str, ir: str) -> int:
"""JIT compile function.
Parameters
----------
name : str
Specify the function name.
signatures : str
Specify semi-colon separated list of mangled signatures.
ir : str
Specify LLVM IR representation of the function.
"""
engine = irtools.compile_IR(ir)
for msig in signatures.split(';'):
sig = Type.demangle(msig)
ctypes_sig = sig.toctypes()
assert sig.is_function
if sig[0].is_aggregate:
raise RuntimeError(
f'Functions with aggregate return type values are not supported,'
f' got function `{name}` with `{sig}` signature')
fullname = name + msig
addr = engine.get_function_address(fullname)
if self.debug:
print(f'compile({name}, {sig}) -> {hex(addr)}')
# storing engine as the owner of function addresses
if addr:
self.compiled_functions[fullname] = engine, ctypes_sig(addr), sig, ctypes_sig
else:
warnings.warn('No compilation result for {name}|{sig=}')
return True
@dispatchermethod
def call(self, fullname: str, arguments: tuple) -> Data:
"""Call JIT compiled function
Parameters
----------
fullname : str
Specify the full name of the function that is in form
"<name><mangled signature>"
arguments : tuple
Specify the arguments to the function.
"""
# if we are using a tracing allocator, automatically detect memory leaks
# at each call.
if self.use_tracing_allocator:
leak_detector = tracing_allocator.new_leak_detector()
else:
leak_detector = nullcontext()
with leak_detector:
return self._do_call(fullname, arguments)
def _do_call(self, fullname, arguments):
if self.debug:
print(f'call({fullname}, {arguments})')
ef = self.compiled_functions.get(fullname)
if ef is None:
raise RuntimeError(
f'no such compiled function `{fullname}`. Available functions:\n'
f' {"; ".join(list(self.compiled_functions))}\n.')
sig = ef[2]
ctypes_sig = ef[3]
if len(arguments) == 0:
assert sig.arity == 1 and sig[1][0].is_void, sig
else:
assert len(arguments) == sig.arity, (len(arguments), sig.arity)
ctypes_arguments = []
for typ, ctypes_typ, value in zip(sig[1], ctypes_sig._argtypes_, arguments):
if typ.is_custom:
typ = typ.get_struct_type()
if typ.is_struct:
if isinstance(value, tuple):
member_values = [t.toctypes()(value[i]) for i, t in enumerate(typ)]
else:
member_values = [t.toctypes()(getattr(value, t.name)) for t in typ]
ctypes_arguments.extend(member_values)
elif typ.is_pointer:
if isinstance(value, ctypes.c_void_p):
value = ctypes.cast(value, ctypes_typ)
else:
value = ctypes.cast(value, ctypes_typ)
ctypes_arguments.append(value)
else:
ctypes_arguments.append(value)
r = ef[1](*ctypes_arguments)
if sig[0].is_pointer and sig[0][0].is_void and isinstance(r, int):
r = ctypes.c_void_p(r)
if self.debug:
print(f'-> {r}')
if hasattr(r, 'topython'):
return r.topython()
return r
@dispatchermethod
def python(self, statement: str) -> int:
"""Execute Python statement.
"""
if self.debug:
print(f'python({statement!r})')
exec(statement, self.python_globals, self.python_locals)
return True
class DebugDispatcherRJIT(DispatcherRJIT):
"""
Enables debug messages.
"""
debug = True
class LocalClient(object):
"""Pretender of thrift.Client.
All calls will be made in a local process. Useful for debbuging.
"""
def __init__(self, debug=False, use_tracing_allocator=False):
self.dispatcher = DispatcherRJIT(None, debug=debug,
use_tracing_allocator=use_tracing_allocator)
def __call__(self, **services):
results = {}
for service_name, query_dict in services.items():
results[service_name] = {}
for mthname, args in query_dict.items():
mth = getattr(self.dispatcher, mthname)
mth = inspect.unwrap(mth)
results[service_name][mthname] = mth(self.dispatcher, *args)
return results
|
#Import Libraries
#Web Scraping tools
from bs4 import BeautifulSoup as bs
from selenium import webdriver
#from splinter import Browser
#DataFrame tools
import pandas as pd
#Misc tools for web scraping
import time
import requests
#Function to initianilze browser.
def init_browser():
#Settings for headless mode.
options = webdriver.ChromeOptions()
options.add_argument('headless')
#path to the driver and load the options.
browser = webdriver.Chrome("/usr/local/bin/chromedriver",chrome_options = options)
#returns the brower.
return browser
def scrapper():
#Call browser function
browser = init_browser()
#Dictionary to store all the results.
marsInfo_dict = {}
#Code to get NASA Mars News ----------------------------------------------------------------------------------------------
try:
url = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&year=2020%3Apublish_date&category=19%2C165%2C184%2C204&blank_scope=Latest"
#splinter option - open url
#browser.visit(url)
#Open url.
browser.get(url)
#Time to let the website load all the elements
time.sleep(4)
#splinter option - save HTML
#html = browser.html
#save the html source.
html = browser.page_source
#Use bs4 to parse the html response.
soup = bs(html, "html.parser")
#Collect the latest news title
news_title = soup.find_all('li', class_="slide")[0].find(class_="content_title").text
news_p = soup.find_all('li', class_="slide")[0].text
marsInfo_dict['news_title'] = news_title
marsInfo_dict['news_p'] = news_p
except :
print(f"Problem at website {url}")
#Code to get JPL Mars Space Images - Featured Image ---------------------------------------------------------------------------------
try:
url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
#splinter option - open url
#browser.visit(url)
#Opens the url.
browser.get(url)
#splinter option - FULL IMAGE BUTTON
#browser.click_link_by_id("full_image")
#Interact with the FULL IMAGE BUTTON
browser.find_element_by_id("full_image").click()
time.sleep(4)
#splinter option - save HTML
#html = browser.html
#save the html source.
html = browser.page_source
#Use bs4 to parse the html response.
soup = bs(html, "html.parser")
featured_image_url = "https://www.jpl.nasa.gov/" + soup.find_all('img', class_="fancybox-image")[0]['src']
marsInfo_dict['featured_image_url'] = featured_image_url
except :
print(f"Problem at website {url}")
#Mars Weather ------------------------------------------------------------------------------------------------------------------------
try:
url = "https://twitter.com/marswxreport?lang=en"
#splinter option - open url
#browser.visit(url)
#Open the url.
browser.get(url)
#Time to let the website load all the elements
time.sleep(4)
#splinter option - save HTML
#html = browser.html
#save the html source.
html = browser.page_source
#Use bs4 to parse the html response.
soup = bs(html, "html.parser")
mars_weather = soup.find_all('article', class_="css-1dbjc4n r-1loqt21 r-18u37iz r-1ny4l3l r-o7ynqc r-6416eg")[0].text.strip().replace('Mars Weather@MarsWxReport·19hInSight ','')
marsInfo_dict['mars_weather'] = mars_weather
except :
print(mars_weather)
print(f"Problem at website {url}")
# Mars Facts--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
try:
url = 'http://space-facts.com/mars/'
#Load url to pandas read html.
tables = pd.read_html(url)
#Tables
marsFacts_df = tables[0]
earthMars_df = tables[1]
#Rename columns
marsFacts_df.columns = ['Facts', 'Values']
#Outpout
html_outputFacts = marsFacts_df.to_html(index = False)
html_outputFacts = html_outputFacts.replace('\n', '')
html_outputMarsEarth = earthMars_df.to_html(index = False)
html_outputMarsEarth = html_outputMarsEarth.replace('\n', '')
marsInfo_dict['html_outputFacts'] = html_outputFacts
marsInfo_dict['html_outputMarsEarth'] = html_outputMarsEarth
except :
print(f"Problem at website {url}")
#hemisphereImages ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
try:
temp_list = []
url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
#splinter option - open url
#browser.visit(url)
#Opens the url.
browser.get(url)
time.sleep(4)
#splinter option - save HTML
#html = browser.html
#save the html source.
html = browser.page_source
# close web browser
browser.close()
#Use bs4 to parse the html response.
soup = bs(html, "html.parser")
links = soup.find_all('div', class_="description")
for link in links:
highDef_url = f"https://astrogeology.usgs.gov{link.find("a")["href"]}"
responseHighDef = requests.get(highDef_url)
soupHighDef = bs(responseHighDef.text, 'html.parser')
highDef_url = soupHighDef.find_all("div", class_="downloads")[0].find('a')['href']
title = link.find('h3').text
temp_list.append({"title" : title, "img_url" : highDef_url})
marsInfo_dict['hemisphere_image_urls'] = temp_list
except :
print(f"Problem at website {url}")
return marsInfo_dict
|
#Import Libraries
#Web Scraping tools
from bs4 import BeautifulSoup as bs
from selenium import webdriver
#from splinter import Browser
#DataFrame tools
import pandas as pd
#Misc tools for web scraping
import time
import requests
#Function to initianilze browser.
def init_browser():
#Settings for headless mode.
options = webdriver.ChromeOptions()
options.add_argument('headless')
#path to the driver and load the options.
browser = webdriver.Chrome("/usr/local/bin/chromedriver",chrome_options = options)
#returns the brower.
return browser
def scrapper():
#Call browser function
browser = init_browser()
#Dictionary to store all the results.
marsInfo_dict = {}
#Code to get NASA Mars News ----------------------------------------------------------------------------------------------
try:
url = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&year=2020%3Apublish_date&category=19%2C165%2C184%2C204&blank_scope=Latest"
#splinter option - open url
#browser.visit(url)
#Open url.
browser.get(url)
#Time to let the website load all the elements
time.sleep(4)
#splinter option - save HTML
#html = browser.html
#save the html source.
html = browser.page_source
#Use bs4 to parse the html response.
soup = bs(html, "html.parser")
#Collect the latest news title
news_title = soup.find_all('li', class_="slide")[0].find(class_="content_title").text
news_p = soup.find_all('li', class_="slide")[0].text
marsInfo_dict['news_title'] = news_title
marsInfo_dict['news_p'] = news_p
except :
print(f"Problem at website {url}")
#Code to get JPL Mars Space Images - Featured Image ---------------------------------------------------------------------------------
try:
url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
#splinter option - open url
#browser.visit(url)
#Opens the url.
browser.get(url)
#splinter option - FULL IMAGE BUTTON
#browser.click_link_by_id("full_image")
#Interact with the FULL IMAGE BUTTON
browser.find_element_by_id("full_image").click()
time.sleep(4)
#splinter option - save HTML
#html = browser.html
#save the html source.
html = browser.page_source
#Use bs4 to parse the html response.
soup = bs(html, "html.parser")
featured_image_url = "https://www.jpl.nasa.gov/" + soup.find_all('img', class_="fancybox-image")[0]['src']
marsInfo_dict['featured_image_url'] = featured_image_url
except :
print(f"Problem at website {url}")
#Mars Weather ------------------------------------------------------------------------------------------------------------------------
try:
url = "https://twitter.com/marswxreport?lang=en"
#splinter option - open url
#browser.visit(url)
#Open the url.
browser.get(url)
#Time to let the website load all the elements
time.sleep(4)
#splinter option - save HTML
#html = browser.html
#save the html source.
html = browser.page_source
#Use bs4 to parse the html response.
soup = bs(html, "html.parser")
mars_weather = soup.find_all('article', class_="css-1dbjc4n r-1loqt21 r-18u37iz r-1ny4l3l r-o7ynqc r-6416eg")[0].text.strip().replace('Mars Weather@MarsWxReport·19hInSight ','')
marsInfo_dict['mars_weather'] = mars_weather
except :
print(mars_weather)
print(f"Problem at website {url}")
# Mars Facts--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
try:
url = 'http://space-facts.com/mars/'
#Load url to pandas read html.
tables = pd.read_html(url)
#Tables
marsFacts_df = tables[0]
earthMars_df = tables[1]
#Rename columns
marsFacts_df.columns = ['Facts', 'Values']
#Outpout
html_outputFacts = marsFacts_df.to_html(index = False)
html_outputFacts = html_outputFacts.replace('\n', '')
html_outputMarsEarth = earthMars_df.to_html(index = False)
html_outputMarsEarth = html_outputMarsEarth.replace('\n', '')
marsInfo_dict['html_outputFacts'] = html_outputFacts
marsInfo_dict['html_outputMarsEarth'] = html_outputMarsEarth
except :
print(f"Problem at website {url}")
#hemisphereImages ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
try:
temp_list = []
url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
#splinter option - open url
#browser.visit(url)
#Opens the url.
browser.get(url)
time.sleep(4)
#splinter option - save HTML
#html = browser.html
#save the html source.
html = browser.page_source
# close web browser
browser.close()
#Use bs4 to parse the html response.
soup = bs(html, "html.parser")
links = soup.find_all('div', class_="description")
for link in links:
highDef_url = f"https://astrogeology.usgs.gov{link.find('a')['href']}"
responseHighDef = requests.get(highDef_url)
soupHighDef = bs(responseHighDef.text, 'html.parser')
highDef_url = soupHighDef.find_all("div", class_="downloads")[0].find('a')['href']
title = link.find('h3').text
temp_list.append({"title" : title, "img_url" : highDef_url})
marsInfo_dict['hemisphere_image_urls'] = temp_list
except :
print(f"Problem at website {url}")
return marsInfo_dict
|
import asyncio
import datetime
import logging
import secrets
from main import game
class GameError(Exception):
pass
class ForbiddenMoveError(GameError):
pass
class MoveIsNotPossible(GameError):
pass
class Game:
def __init__(self):
self._game = game
self._is_started = False
self._is_finished = False
self._available_move_time = 2.2 # 200 ms plus, cause for network latency
self._available_current_move_time = self._available_move_time
self._players = {}
self._lost_time_player = None
self._last_move = None
self._colors_table = {
1: 'RED',
2: 'BLACK',
None: 'None'
}
def _whose_turn(self):
return self._colors_table[self._game.whose_turn()]
def _status(self):
if not self._is_started:
return 'Not yet started'
if self._lost_time_player:
return f'Player {self._colors_table[self._lost_time_player]} reached time limit'
return 'Game is over' if self._game.is_over() else 'Game is playing'
def _winner(self):
if self._lost_time_player:
return self._colors_table[1] \
if self._lost_time_player == 2 \
else self._colors_table[2]
return self._colors_table[self._game.get_winner()] if self._game.get_winner() else None
def _board(self):
output = []
for piece in self._game.board.pieces:
if not piece.captured:
output.append({
'color': 'RED' if piece.player == 1 else 'BLACK',
'row': piece.get_row(),
'column': piece.get_column(),
'king': piece.king,
'position': piece.position
})
return output
def add_player(self, team_name):
if self._is_started:
return
player_num = 2 if 1 in self._players else 1
token = secrets.token_hex(16)
self._players[player_num] = {
'token': token,
'team_name': team_name
}
if 1 in self._players and 2 in self._players:
asyncio.ensure_future(self.start())
return {
'color': self._colors_table[player_num],
'token': token
}
async def start(self):
logging.info(f'...GAME IS STARTED at {datetime.datetime.now().isoformat()}...')
logging.info(
f'1 player, color: {self._colors_table[1]}, team name: {self._players[1]['team_name']}'
)
logging.info(
f'2 player, color: {self._colors_table[2]}, team name: {self._players[2]['team_name']}'
)
self._is_started = True
while True:
logging.info(
f'Available time for player "{self._colors_table[self._game.whose_turn()]}" '
f'move: {self._available_current_move_time}'
)
await asyncio.sleep(0.05)
self._available_current_move_time -= 0.05
if self._available_current_move_time < 0:
self._lost_time_player = self._game.whose_turn()
self._is_finished = True
break
if self._game.is_over():
self._is_finished = True
break
if self._lost_time_player == 1:
winner = 2
elif self._lost_time_player == 2:
winner = 1
else:
winner = self._game.get_winner()
self._game.set_winner({
'color': self._colors_table[winner],
'team_name': self._players[winner]['team_name']
})
logging.info(
f'...GAME WAS FINISHED at {datetime.datetime.now().isoformat()}, winner: {self._game.get_board_winner()}'
)
def move(self, token, move):
player = self._players[self._game.whose_turn()]
if player['token'] != token:
raise ForbiddenMoveError
try:
if self._last_move is not None and self._last_move['player'] == self._whose_turn():
self._last_move['last_moves'].append(move)
else:
self._last_move = {
'player': self._whose_turn(),
'last_moves': [move]
}
self._game.move(move)
logging.info(
f'{player['team_name']} made move ({move}) at {datetime.datetime.now().isoformat()}'
)
self._available_current_move_time = self._available_move_time
except ValueError as e:
raise MoveIsNotPossible(str(e))
def is_started(self):
return self._is_started
def is_finished(self):
return self._is_finished
@property
def json(self):
return {
'status': self._status(),
'whose_turn': self._whose_turn(),
'winner': self._winner(),
'board': self._board(),
'available_time': self._available_current_move_time,
'last_move': self._last_move,
'is_started': self.is_started(),
'is_finished': self.is_finished()
}
|
import asyncio
import datetime
import logging
import secrets
from main import game
class GameError(Exception):
pass
class ForbiddenMoveError(GameError):
pass
class MoveIsNotPossible(GameError):
pass
class Game:
def __init__(self):
self._game = game
self._is_started = False
self._is_finished = False
self._available_move_time = 2.2 # 200 ms plus, cause for network latency
self._available_current_move_time = self._available_move_time
self._players = {}
self._lost_time_player = None
self._last_move = None
self._colors_table = {
1: 'RED',
2: 'BLACK',
None: 'None'
}
def _whose_turn(self):
return self._colors_table[self._game.whose_turn()]
def _status(self):
if not self._is_started:
return 'Not yet started'
if self._lost_time_player:
return f'Player {self._colors_table[self._lost_time_player]} reached time limit'
return 'Game is over' if self._game.is_over() else 'Game is playing'
def _winner(self):
if self._lost_time_player:
return self._colors_table[1] \
if self._lost_time_player == 2 \
else self._colors_table[2]
return self._colors_table[self._game.get_winner()] if self._game.get_winner() else None
def _board(self):
output = []
for piece in self._game.board.pieces:
if not piece.captured:
output.append({
'color': 'RED' if piece.player == 1 else 'BLACK',
'row': piece.get_row(),
'column': piece.get_column(),
'king': piece.king,
'position': piece.position
})
return output
def add_player(self, team_name):
if self._is_started:
return
player_num = 2 if 1 in self._players else 1
token = secrets.token_hex(16)
self._players[player_num] = {
'token': token,
'team_name': team_name
}
if 1 in self._players and 2 in self._players:
asyncio.ensure_future(self.start())
return {
'color': self._colors_table[player_num],
'token': token
}
async def start(self):
logging.info(f'...GAME IS STARTED at {datetime.datetime.now().isoformat()}...')
logging.info(
f'1 player, color: {self._colors_table[1]}, team name: {self._players[1]["team_name"]}'
)
logging.info(
f'2 player, color: {self._colors_table[2]}, team name: {self._players[2]["team_name"]}'
)
self._is_started = True
while True:
logging.info(
f'Available time for player "{self._colors_table[self._game.whose_turn()]}" '
f'move: {self._available_current_move_time}'
)
await asyncio.sleep(0.05)
self._available_current_move_time -= 0.05
if self._available_current_move_time < 0:
self._lost_time_player = self._game.whose_turn()
self._is_finished = True
break
if self._game.is_over():
self._is_finished = True
break
if self._lost_time_player == 1:
winner = 2
elif self._lost_time_player == 2:
winner = 1
else:
winner = self._game.get_winner()
self._game.set_winner({
'color': self._colors_table[winner],
'team_name': self._players[winner]['team_name']
})
logging.info(
f'...GAME WAS FINISHED at {datetime.datetime.now().isoformat()}, winner: {self._game.get_board_winner()}'
)
def move(self, token, move):
player = self._players[self._game.whose_turn()]
if player['token'] != token:
raise ForbiddenMoveError
try:
if self._last_move is not None and self._last_move['player'] == self._whose_turn():
self._last_move['last_moves'].append(move)
else:
self._last_move = {
'player': self._whose_turn(),
'last_moves': [move]
}
self._game.move(move)
logging.info(
f'{player["team_name"]} made move ({move}) at {datetime.datetime.now().isoformat()}'
)
self._available_current_move_time = self._available_move_time
except ValueError as e:
raise MoveIsNotPossible(str(e))
def is_started(self):
return self._is_started
def is_finished(self):
return self._is_finished
@property
def json(self):
return {
'status': self._status(),
'whose_turn': self._whose_turn(),
'winner': self._winner(),
'board': self._board(),
'available_time': self._available_current_move_time,
'last_move': self._last_move,
'is_started': self.is_started(),
'is_finished': self.is_finished()
}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Helper tool to check file types that are allowed to checkin."""
import os
import sys
import subprocess
# List of file types we allow
ALLOW_EXTENSION = {
# source code
"cc",
"c",
"h",
"s",
"rs",
"m",
"mm",
"g4",
"gradle",
"js",
"tcl",
"scala",
"java",
"go",
"ts",
"sh",
"py",
"pyi",
"pxi",
"pyd",
"pyx",
"cu",
"bat",
# relay text format
"rly",
# configurations
"mk",
"in",
"cmake",
"xml",
"toml",
"yml",
"yaml",
"json",
# docs
"txt",
"md",
"rst",
# sgx
"edl",
"lds",
# ios
"pbxproj",
"plist",
"xcworkspacedata",
"storyboard",
# hw/chisel
"sbt",
"properties",
"v",
"sdc",
# generated parser
"interp",
"tokens",
# interface definition
"idl",
# opencl file
"cl",
# zephyr config file
"conf",
# linker scripts
"ld",
}
# List of file names allowed
ALLOW_FILE_NAME = {
".gitignore",
".eslintignore",
".gitattributes",
"README",
"Makefile",
"Doxyfile",
"pylintrc",
"rat-excludes",
"log4j.properties",
".clang-format",
".gitmodules",
"CODEOWNERS",
".scalafmt.conf",
"Cargo.lock",
"with_the_same_user",
}
# List of specific files allowed in relpath to <proj_root>
ALLOW_SPECIFIC_FILE = {
"LICENSE",
"NOTICE",
"KEYS",
"DISCLAIMER",
"Jenkinsfile",
"mypy.ini",
# cargo config
"rust/runtime/tests/test_wasm32/.cargo/config",
"rust/tvm-graph-rt/tests/test_wasm32/.cargo/config",
"apps/sgx/.cargo/config",
"apps/wasm-standalone/wasm-graph/.cargo/config",
# html for demo purposes
"web/apps/browser/rpc_server.html",
# images are normally not allowed
# discuss with committers before add more images
"apps/android_rpc/app/src/main/res/mipmap-hdpi/ic_launcher.png",
"apps/android_rpc/app/src/main/res/mipmap-mdpi/ic_launcher.png",
# documentation related files
"docs/_static/css/tvm_theme.css",
"docs/_static/img/tvm-logo-small.png",
"docs/_static/img/tvm-logo-square.png",
# pytest config
"pytest.ini",
# microTVM tests
"tests/micro/zephyr/testdata/digit-2.jpg",
"tests/micro/zephyr/testdata/digit-9.jpg",
"tests/micro/zephyr/testdata/mnist-8.onnx",
"tests/micro/zephyr/testdata/ic_sample_fp32_8.npy",
# microTVM Zephyr runtime
"apps/microtvm/zephyr/template_project/CMakeLists.txt.template",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-arm",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-xilinx-aarch64",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-i386",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-riscv32",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-riscv64",
# microTVM Virtual Machines
"apps/microtvm/reference-vm/zephyr/Vagrantfile",
"apps/microtvm/reference-vm/zephyr/base-box/Vagrantfile.packer-template",
}
def filename_allowed(name):
"""Check if name is allowed by the current policy.
Paramaters
----------
name : str
Input name
Returns
-------
allowed : bool
Whether the filename is allowed.
"""
arr = name.rsplit(".", 1)
if arr[-1] in ALLOW_EXTENSION:
return True
if os.path.basename(name) in ALLOW_FILE_NAME:
return True
if os.path.basename(name).startswith("Dockerfile"):
return True
if name.startswith("3rdparty"):
return True
if name in ALLOW_SPECIFIC_FILE:
return True
return False
def copyright_line(line):
# Following two items are intentionally break apart
# so that the copyright detector won't detect the file itself.
if line.find("Copyright " + "(c)") != -1:
return True
# break pattern into two lines to avoid false-negative check
spattern1 = "Copyright"
if line.find(spattern1) != -1 and line.find("by") != -1:
return True
return False
def check_asf_copyright(fname):
if fname.endswith(".png"):
return True
if not os.path.isfile(fname):
return True
has_asf_header = False
has_copyright = False
try:
for line in open(fname):
if line.find("Licensed to the Apache Software Foundation") != -1:
has_asf_header = True
if copyright_line(line):
has_copyright = True
if has_asf_header and has_copyright:
return False
except UnicodeDecodeError:
pass
return True
def main():
cmd = ["git", "ls-files"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
assert proc.returncode == 0, f'{' '.join(cmd)} errored: {out}'
res = out.decode("utf-8")
flist = res.split()
error_list = []
for fname in flist:
if not filename_allowed(fname):
error_list.append(fname)
if error_list:
report = "------File type check report----\n"
report += "\n".join(error_list)
report += "\nFound %d files that are now allowed\n" % len(error_list)
report += (
"We do not check in binary files into the repo.\n"
"If necessary, please discuss with committers and"
"modify tests/lint/check_file_type.py to enable the file you need.\n"
)
sys.stderr.write(report)
sys.stderr.flush()
sys.exit(-1)
asf_copyright_list = []
for fname in res.split():
if not check_asf_copyright(fname):
asf_copyright_list.append(fname)
if asf_copyright_list:
report = "------File type check report----\n"
report += "\n".join(asf_copyright_list) + "\n"
report += "------Found %d files that has ASF header with copyright message----\n" % len(
asf_copyright_list
)
report += "--- Files with ASF header do not need Copyright lines.\n"
report += "--- Contributors retain copyright to their contribution by default.\n"
report += "--- If a file comes with a different license, consider put it under the 3rdparty folder instead.\n"
report += "---\n"
report += "--- You can use the following steps to remove the copyright lines\n"
report += "--- Create file_list.txt in your text editor\n"
report += "--- Copy paste the above content in file-list into file_list.txt\n"
report += "--- python3 tests/lint/add_asf_header.py file_list.txt\n"
sys.stderr.write(report)
sys.stderr.flush()
sys.exit(-1)
print("check_file_type.py: all checks passed..")
if __name__ == "__main__":
main()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Helper tool to check file types that are allowed to checkin."""
import os
import sys
import subprocess
# List of file types we allow
ALLOW_EXTENSION = {
# source code
"cc",
"c",
"h",
"s",
"rs",
"m",
"mm",
"g4",
"gradle",
"js",
"tcl",
"scala",
"java",
"go",
"ts",
"sh",
"py",
"pyi",
"pxi",
"pyd",
"pyx",
"cu",
"bat",
# relay text format
"rly",
# configurations
"mk",
"in",
"cmake",
"xml",
"toml",
"yml",
"yaml",
"json",
# docs
"txt",
"md",
"rst",
# sgx
"edl",
"lds",
# ios
"pbxproj",
"plist",
"xcworkspacedata",
"storyboard",
# hw/chisel
"sbt",
"properties",
"v",
"sdc",
# generated parser
"interp",
"tokens",
# interface definition
"idl",
# opencl file
"cl",
# zephyr config file
"conf",
# linker scripts
"ld",
}
# List of file names allowed
ALLOW_FILE_NAME = {
".gitignore",
".eslintignore",
".gitattributes",
"README",
"Makefile",
"Doxyfile",
"pylintrc",
"rat-excludes",
"log4j.properties",
".clang-format",
".gitmodules",
"CODEOWNERS",
".scalafmt.conf",
"Cargo.lock",
"with_the_same_user",
}
# List of specific files allowed in relpath to <proj_root>
ALLOW_SPECIFIC_FILE = {
"LICENSE",
"NOTICE",
"KEYS",
"DISCLAIMER",
"Jenkinsfile",
"mypy.ini",
# cargo config
"rust/runtime/tests/test_wasm32/.cargo/config",
"rust/tvm-graph-rt/tests/test_wasm32/.cargo/config",
"apps/sgx/.cargo/config",
"apps/wasm-standalone/wasm-graph/.cargo/config",
# html for demo purposes
"web/apps/browser/rpc_server.html",
# images are normally not allowed
# discuss with committers before add more images
"apps/android_rpc/app/src/main/res/mipmap-hdpi/ic_launcher.png",
"apps/android_rpc/app/src/main/res/mipmap-mdpi/ic_launcher.png",
# documentation related files
"docs/_static/css/tvm_theme.css",
"docs/_static/img/tvm-logo-small.png",
"docs/_static/img/tvm-logo-square.png",
# pytest config
"pytest.ini",
# microTVM tests
"tests/micro/zephyr/testdata/digit-2.jpg",
"tests/micro/zephyr/testdata/digit-9.jpg",
"tests/micro/zephyr/testdata/mnist-8.onnx",
"tests/micro/zephyr/testdata/ic_sample_fp32_8.npy",
# microTVM Zephyr runtime
"apps/microtvm/zephyr/template_project/CMakeLists.txt.template",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-arm",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-xilinx-aarch64",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-i386",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-riscv32",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-riscv64",
# microTVM Virtual Machines
"apps/microtvm/reference-vm/zephyr/Vagrantfile",
"apps/microtvm/reference-vm/zephyr/base-box/Vagrantfile.packer-template",
}
def filename_allowed(name):
"""Check if name is allowed by the current policy.
Paramaters
----------
name : str
Input name
Returns
-------
allowed : bool
Whether the filename is allowed.
"""
arr = name.rsplit(".", 1)
if arr[-1] in ALLOW_EXTENSION:
return True
if os.path.basename(name) in ALLOW_FILE_NAME:
return True
if os.path.basename(name).startswith("Dockerfile"):
return True
if name.startswith("3rdparty"):
return True
if name in ALLOW_SPECIFIC_FILE:
return True
return False
def copyright_line(line):
# Following two items are intentionally break apart
# so that the copyright detector won't detect the file itself.
if line.find("Copyright " + "(c)") != -1:
return True
# break pattern into two lines to avoid false-negative check
spattern1 = "Copyright"
if line.find(spattern1) != -1 and line.find("by") != -1:
return True
return False
def check_asf_copyright(fname):
if fname.endswith(".png"):
return True
if not os.path.isfile(fname):
return True
has_asf_header = False
has_copyright = False
try:
for line in open(fname):
if line.find("Licensed to the Apache Software Foundation") != -1:
has_asf_header = True
if copyright_line(line):
has_copyright = True
if has_asf_header and has_copyright:
return False
except UnicodeDecodeError:
pass
return True
def main():
cmd = ["git", "ls-files"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
assert proc.returncode == 0, f'{" ".join(cmd)} errored: {out}'
res = out.decode("utf-8")
flist = res.split()
error_list = []
for fname in flist:
if not filename_allowed(fname):
error_list.append(fname)
if error_list:
report = "------File type check report----\n"
report += "\n".join(error_list)
report += "\nFound %d files that are now allowed\n" % len(error_list)
report += (
"We do not check in binary files into the repo.\n"
"If necessary, please discuss with committers and"
"modify tests/lint/check_file_type.py to enable the file you need.\n"
)
sys.stderr.write(report)
sys.stderr.flush()
sys.exit(-1)
asf_copyright_list = []
for fname in res.split():
if not check_asf_copyright(fname):
asf_copyright_list.append(fname)
if asf_copyright_list:
report = "------File type check report----\n"
report += "\n".join(asf_copyright_list) + "\n"
report += "------Found %d files that has ASF header with copyright message----\n" % len(
asf_copyright_list
)
report += "--- Files with ASF header do not need Copyright lines.\n"
report += "--- Contributors retain copyright to their contribution by default.\n"
report += "--- If a file comes with a different license, consider put it under the 3rdparty folder instead.\n"
report += "---\n"
report += "--- You can use the following steps to remove the copyright lines\n"
report += "--- Create file_list.txt in your text editor\n"
report += "--- Copy paste the above content in file-list into file_list.txt\n"
report += "--- python3 tests/lint/add_asf_header.py file_list.txt\n"
sys.stderr.write(report)
sys.stderr.flush()
sys.exit(-1)
print("check_file_type.py: all checks passed..")
if __name__ == "__main__":
main()
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import getpass
import time
import http
import logging
from random import seed
import infra.network
import infra.proc
import infra.remote_client
import infra.rates
import cimetrics.upload
from loguru import logger as LOG
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.getLogger("paramiko").setLevel(logging.WARNING)
def minimum_number_of_local_nodes(args):
"""
If we are using bft then we need to have 3 nodes. CFT will run with 1 nodes, unless it expects a backup
"""
if args.consensus == "bft":
return 3
if args.send_tx_to == "backups":
return 2
return 1
def get_command_args(args, get_command):
command_args = []
return get_command(*command_args)
def filter_nodes(primary, backups, filter_type):
if filter_type == "primary":
return [primary]
elif filter_type == "backups":
if not backups:
raise Exception("--send-tx-to backups but no backup was found")
return backups
else:
return [primary] + backups
def configure_remote_client(args, client_id, client_host, node, command_args):
if client_host == "localhost":
client_host = infra.net.expand_localhost()
remote_impl = infra.remote.LocalRemote
else:
remote_impl = infra.remote.SSHRemote
try:
remote_client = infra.remote_client.CCFRemoteClient(
"client_" + str(client_id),
client_host,
args.client,
node.host,
node.rpc_port,
args.workspace,
args.label,
args.config,
command_args,
remote_impl,
)
remote_client.setup()
return remote_client
except Exception:
LOG.exception("Failed to start client {}".format(client_host))
raise
def run(get_command, args):
if args.fixed_seed:
seed(getpass.getuser())
hosts = args.nodes
if not hosts:
hosts = ["local://localhost"] * minimum_number_of_local_nodes(args)
LOG.info("Starting nodes on {}".format(hosts))
with infra.network.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
network.start_and_join(args)
primary, backups = network.find_nodes()
command_args = get_command_args(args, get_command)
nodes_to_send_to = filter_nodes(primary, backups, args.send_tx_to)
clients = []
client_hosts = []
if args.one_client_per_backup:
if not backups:
raise Exception(
"--one-client-per-backup was set but no backup was found"
)
client_hosts = ["localhost"] * len(backups)
else:
if args.client_nodes:
client_hosts.extend(args.client_nodes)
if args.num_localhost_clients:
client_hosts.extend(["localhost"] * int(args.num_localhost_clients))
if not client_hosts:
client_hosts = ["localhost"]
for client_id, client_host in enumerate(client_hosts):
node = nodes_to_send_to[client_id % len(nodes_to_send_to)]
remote_client = configure_remote_client(
args, client_id, client_host, node, command_args
)
clients.append(remote_client)
if args.network_only:
for remote_client in clients:
LOG.info(f"Client can be run with: {remote_client.remote.get_cmd()}")
while True:
time.sleep(60)
else:
for remote_client in clients:
remote_client.start()
hard_stop_timeout = 90
try:
with cimetrics.upload.metrics(complete=False) as metrics:
tx_rates = infra.rates.TxRates(primary)
start_time = time.time()
while True:
stop_waiting = True
for i, remote_client in enumerate(clients):
done = remote_client.check_done()
# all the clients need to be done
LOG.info(
f"Client {i} has {"completed" if done else "not completed"} running ({time.time() - start_time:.2f}s / {hard_stop_timeout}s)"
)
stop_waiting = stop_waiting and done
if stop_waiting:
break
if time.time() > start_time + hard_stop_timeout:
raise TimeoutError(
f"Client still running after {hard_stop_timeout}s"
)
time.sleep(5)
tx_rates.get_metrics()
for remote_client in clients:
perf_result = remote_client.get_result()
LOG.success(f"{args.label}/{remote_client.name}: {perf_result}")
# TODO: Only results for first client are uploaded
# https://github.com/microsoft/CCF/issues/1046
if remote_client == clients[0]:
LOG.success(f"Uploading results for {remote_client.name}")
metrics.put(args.label, perf_result)
else:
LOG.warning(f"Skipping upload for {remote_client.name}")
primary, _ = network.find_primary()
with primary.client() as nc:
r = nc.get("/node/memory")
assert r.status_code == http.HTTPStatus.OK.value
results = r.body.json()
tx_rates.insert_metrics(**results)
# Construct name for heap metric, removing ^ suffix if present
heap_peak_metric = f"Mem_{args.label}"
if heap_peak_metric.endswith("^"):
heap_peak_metric = heap_peak_metric[:-1]
peak_value = results["peak_allocated_heap_size"]
metrics.put(heap_peak_metric, peak_value)
LOG.info(f"Rates:\n{tx_rates}")
tx_rates.save_results(args.metrics_file)
for remote_client in clients:
remote_client.stop()
except Exception:
LOG.error("Stopping clients due to exception")
for remote_client in clients:
remote_client.stop()
raise
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import getpass
import time
import http
import logging
from random import seed
import infra.network
import infra.proc
import infra.remote_client
import infra.rates
import cimetrics.upload
from loguru import logger as LOG
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.getLogger("paramiko").setLevel(logging.WARNING)
def minimum_number_of_local_nodes(args):
"""
If we are using bft then we need to have 3 nodes. CFT will run with 1 nodes, unless it expects a backup
"""
if args.consensus == "bft":
return 3
if args.send_tx_to == "backups":
return 2
return 1
def get_command_args(args, get_command):
command_args = []
return get_command(*command_args)
def filter_nodes(primary, backups, filter_type):
if filter_type == "primary":
return [primary]
elif filter_type == "backups":
if not backups:
raise Exception("--send-tx-to backups but no backup was found")
return backups
else:
return [primary] + backups
def configure_remote_client(args, client_id, client_host, node, command_args):
if client_host == "localhost":
client_host = infra.net.expand_localhost()
remote_impl = infra.remote.LocalRemote
else:
remote_impl = infra.remote.SSHRemote
try:
remote_client = infra.remote_client.CCFRemoteClient(
"client_" + str(client_id),
client_host,
args.client,
node.host,
node.rpc_port,
args.workspace,
args.label,
args.config,
command_args,
remote_impl,
)
remote_client.setup()
return remote_client
except Exception:
LOG.exception("Failed to start client {}".format(client_host))
raise
def run(get_command, args):
if args.fixed_seed:
seed(getpass.getuser())
hosts = args.nodes
if not hosts:
hosts = ["local://localhost"] * minimum_number_of_local_nodes(args)
LOG.info("Starting nodes on {}".format(hosts))
with infra.network.network(
hosts, args.binary_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
network.start_and_join(args)
primary, backups = network.find_nodes()
command_args = get_command_args(args, get_command)
nodes_to_send_to = filter_nodes(primary, backups, args.send_tx_to)
clients = []
client_hosts = []
if args.one_client_per_backup:
if not backups:
raise Exception(
"--one-client-per-backup was set but no backup was found"
)
client_hosts = ["localhost"] * len(backups)
else:
if args.client_nodes:
client_hosts.extend(args.client_nodes)
if args.num_localhost_clients:
client_hosts.extend(["localhost"] * int(args.num_localhost_clients))
if not client_hosts:
client_hosts = ["localhost"]
for client_id, client_host in enumerate(client_hosts):
node = nodes_to_send_to[client_id % len(nodes_to_send_to)]
remote_client = configure_remote_client(
args, client_id, client_host, node, command_args
)
clients.append(remote_client)
if args.network_only:
for remote_client in clients:
LOG.info(f"Client can be run with: {remote_client.remote.get_cmd()}")
while True:
time.sleep(60)
else:
for remote_client in clients:
remote_client.start()
hard_stop_timeout = 90
try:
with cimetrics.upload.metrics(complete=False) as metrics:
tx_rates = infra.rates.TxRates(primary)
start_time = time.time()
while True:
stop_waiting = True
for i, remote_client in enumerate(clients):
done = remote_client.check_done()
# all the clients need to be done
LOG.info(
f"Client {i} has {'completed' if done else 'not completed'} running ({time.time() - start_time:.2f}s / {hard_stop_timeout}s)"
)
stop_waiting = stop_waiting and done
if stop_waiting:
break
if time.time() > start_time + hard_stop_timeout:
raise TimeoutError(
f"Client still running after {hard_stop_timeout}s"
)
time.sleep(5)
tx_rates.get_metrics()
for remote_client in clients:
perf_result = remote_client.get_result()
LOG.success(f"{args.label}/{remote_client.name}: {perf_result}")
# TODO: Only results for first client are uploaded
# https://github.com/microsoft/CCF/issues/1046
if remote_client == clients[0]:
LOG.success(f"Uploading results for {remote_client.name}")
metrics.put(args.label, perf_result)
else:
LOG.warning(f"Skipping upload for {remote_client.name}")
primary, _ = network.find_primary()
with primary.client() as nc:
r = nc.get("/node/memory")
assert r.status_code == http.HTTPStatus.OK.value
results = r.body.json()
tx_rates.insert_metrics(**results)
# Construct name for heap metric, removing ^ suffix if present
heap_peak_metric = f"Mem_{args.label}"
if heap_peak_metric.endswith("^"):
heap_peak_metric = heap_peak_metric[:-1]
peak_value = results["peak_allocated_heap_size"]
metrics.put(heap_peak_metric, peak_value)
LOG.info(f"Rates:\n{tx_rates}")
tx_rates.save_results(args.metrics_file)
for remote_client in clients:
remote_client.stop()
except Exception:
LOG.error("Stopping clients due to exception")
for remote_client in clients:
remote_client.stop()
raise
|
from getpass import getuser
from shlex import quote
from typing import Dict
import click
import hashlib
import json
import logging
import os
import subprocess
import sys
import time
import warnings
from ray.autoscaler.command_runner import CommandRunnerInterface
from ray.autoscaler._private.docker import check_bind_mounts_cmd, \
check_docker_running_cmd, \
check_docker_image, \
docker_start_cmds, \
DOCKER_MOUNT_PREFIX, \
with_docker_exec
from ray.autoscaler._private.log_timer import LogTimer
from ray.autoscaler._private.subprocess_output_util import (
run_cmd_redirected, ProcessRunnerError, is_output_redirected)
from ray.autoscaler._private.cli_logger import cli_logger
import colorful as cf
logger = logging.getLogger(__name__)
# How long to wait for a node to start, in seconds
NODE_START_WAIT_S = 300
HASH_MAX_LENGTH = 10
KUBECTL_RSYNC = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "kubernetes/kubectl-rsync.sh")
_config = {"use_login_shells": True, "silent_rsync": True}
def is_rsync_silent():
return _config["silent_rsync"]
def set_rsync_silent(val):
"""Choose whether to silence rsync output.
Most commands will want to list rsync'd files themselves rather than
print the default rsync spew.
"""
_config["silent_rsync"] = val
def is_using_login_shells():
return _config["use_login_shells"]
def set_using_login_shells(val):
"""Choose between login and non-interactive shells.
Non-interactive shells have the benefit of receiving less output from
subcommands (since progress bars and TTY control codes are not printed).
Sometimes this can be significant since e.g. `pip install` prints
hundreds of progress bar lines when downloading.
Login shells have the benefit of working very close to how a proper bash
session does, regarding how scripts execute and how the environment is
setup. This is also how all commands were ran in the past. The only reason
to use login shells over non-interactive shells is if you need some weird
and non-robust tool to work.
Args:
val (bool): If true, login shells will be used to run all commands.
"""
_config["use_login_shells"] = val
def _with_environment_variables(cmd: str,
environment_variables: Dict[str, object]):
"""Prepend environment variables to a shell command.
Args:
cmd (str): The base command.
environment_variables (Dict[str, object]): The set of environment
variables. If an environment variable value is a dict, it will
automatically be converted to a one line yaml string.
"""
as_strings = []
for key, val in environment_variables.items():
val = json.dumps(val, separators=(",", ":"))
s = "export {}={};".format(key, quote(val))
as_strings.append(s)
all_vars = "".join(as_strings)
return all_vars + cmd
def _with_interactive(cmd):
force_interactive = (
f"true && source ~/.bashrc && "
f"export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && ({cmd})")
return ["bash", "--login", "-c", "-i", quote(force_interactive)]
class KubernetesCommandRunner(CommandRunnerInterface):
def __init__(self, log_prefix, namespace, node_id, auth_config,
process_runner):
self.log_prefix = log_prefix
self.process_runner = process_runner
self.node_id = str(node_id)
self.namespace = namespace
self.kubectl = ["kubectl", "-n", self.namespace]
def run(
self,
cmd=None,
timeout=120,
exit_on_fail=False,
port_forward=None,
with_output=False,
environment_variables: Dict[str, object] = None,
run_env="auto", # Unused argument.
ssh_options_override_ssh_key="", # Unused argument.
shutdown_after_run=False,
):
if shutdown_after_run:
cmd += "; sudo shutdown -h now"
if cmd and port_forward:
raise Exception(
"exec with Kubernetes can't forward ports and execute"
"commands together.")
if port_forward:
if not isinstance(port_forward, list):
port_forward = [port_forward]
port_forward_cmd = self.kubectl + [
"port-forward",
self.node_id,
] + [
"{}:{}".format(local, remote) for local, remote in port_forward
]
logger.info("Port forwarding with: {}".format(
" ".join(port_forward_cmd)))
port_forward_process = subprocess.Popen(port_forward_cmd)
port_forward_process.wait()
# We should never get here, this indicates that port forwarding
# failed, likely because we couldn't bind to a port.
pout, perr = port_forward_process.communicate()
exception_str = " ".join(
port_forward_cmd) + " failed with error: " + perr
raise Exception(exception_str)
else:
final_cmd = self.kubectl + ["exec", "-it"]
final_cmd += [
self.node_id,
"--",
]
if environment_variables:
cmd = _with_environment_variables(cmd, environment_variables)
cmd = _with_interactive(cmd)
cmd_prefix = " ".join(final_cmd)
final_cmd += cmd
# `kubectl exec` + subprocess w/ list of args has unexpected
# side-effects.
final_cmd = " ".join(final_cmd)
logger.info(self.log_prefix + "Running {}".format(final_cmd))
try:
if with_output:
return self.process_runner.check_output(
final_cmd, shell=True)
else:
self.process_runner.check_call(final_cmd, shell=True)
except subprocess.CalledProcessError:
if exit_on_fail:
quoted_cmd = cmd_prefix + quote(" ".join(cmd))
logger.error(
self.log_prefix +
"Command failed: \n\n {}\n".format(quoted_cmd))
sys.exit(1)
else:
raise
def run_rsync_up(self, source, target, options=None):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call([
KUBECTL_RSYNC,
"-avz",
source,
"{}@{}:{}".format(self.node_id, self.namespace, target),
])
except Exception as e:
warnings.warn(
self.log_prefix +
"rsync failed: '{}'. Falling back to 'kubectl cp'".format(e),
UserWarning)
if target.startswith("~"):
target = "/root" + target[1:]
self.process_runner.check_call(self.kubectl + [
"cp", source, "{}/{}:{}".format(self.namespace, self.node_id,
target)
])
def run_rsync_down(self, source, target, options=None):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call([
KUBECTL_RSYNC,
"-avz",
"{}@{}:{}".format(self.node_id, self.namespace, source),
target,
])
except Exception as e:
warnings.warn(
self.log_prefix +
"rsync failed: '{}'. Falling back to 'kubectl cp'".format(e),
UserWarning)
if target.startswith("~"):
target = "/root" + target[1:]
self.process_runner.check_call(self.kubectl + [
"cp", "{}/{}:{}".format(self.namespace, self.node_id, source),
target
])
def remote_shell_command_str(self):
return "{} exec -it {} bash".format(" ".join(self.kubectl),
self.node_id)
class SSHOptions:
def __init__(self, ssh_key, control_path=None, **kwargs):
self.ssh_key = ssh_key
self.arg_dict = {
# Supresses initial fingerprint verification.
"StrictHostKeyChecking": "no",
# SSH IP and fingerprint pairs no longer added to known_hosts.
# This is to remove a "REMOTE HOST IDENTIFICATION HAS CHANGED"
# warning if a new node has the same IP as a previously
# deleted node, because the fingerprints will not match in
# that case.
"UserKnownHostsFile": os.devnull,
# Try fewer extraneous key pairs.
"IdentitiesOnly": "yes",
# Abort if port forwarding fails (instead of just printing to
# stderr).
"ExitOnForwardFailure": "yes",
# Quickly kill the connection if network connection breaks (as
# opposed to hanging/blocking).
"ServerAliveInterval": 5,
"ServerAliveCountMax": 3
}
if control_path:
self.arg_dict.update({
"ControlMaster": "auto",
"ControlPath": "{}/%C".format(control_path),
"ControlPersist": "10s",
})
self.arg_dict.update(kwargs)
def to_ssh_options_list(self, *, timeout=60):
self.arg_dict["ConnectTimeout"] = "{}s".format(timeout)
ssh_key_option = ["-i", self.ssh_key] if self.ssh_key else []
return ssh_key_option + [
x for y in (["-o", "{}={}".format(k, v)]
for k, v in self.arg_dict.items()
if v is not None) for x in y
]
class SSHCommandRunner(CommandRunnerInterface):
def __init__(self, log_prefix, node_id, provider, auth_config,
cluster_name, process_runner, use_internal_ip):
ssh_control_hash = hashlib.md5(cluster_name.encode()).hexdigest()
ssh_user_hash = hashlib.md5(getuser().encode()).hexdigest()
ssh_control_path = "/tmp/ray_ssh_{}/{}".format(
ssh_user_hash[:HASH_MAX_LENGTH],
ssh_control_hash[:HASH_MAX_LENGTH])
self.log_prefix = log_prefix
self.process_runner = process_runner
self.node_id = node_id
self.use_internal_ip = use_internal_ip
self.provider = provider
self.ssh_private_key = auth_config.get("ssh_private_key")
self.ssh_user = auth_config["ssh_user"]
self.ssh_control_path = ssh_control_path
self.ssh_ip = None
self.ssh_proxy_command = auth_config.get("ssh_proxy_command", None)
self.ssh_options = SSHOptions(
self.ssh_private_key,
self.ssh_control_path,
ProxyCommand=self.ssh_proxy_command)
def _get_node_ip(self):
if self.use_internal_ip:
return self.provider.internal_ip(self.node_id)
else:
return self.provider.external_ip(self.node_id)
def _wait_for_ip(self, deadline):
# if we have IP do not print waiting info
ip = self._get_node_ip()
if ip is not None:
cli_logger.labeled_value("Fetched IP", ip)
return ip
interval = 10
with cli_logger.timed("Waiting for IP"):
while time.time() < deadline and \
not self.provider.is_terminated(self.node_id):
cli_logger.old_info(logger, "{}Waiting for IP...",
self.log_prefix)
ip = self._get_node_ip()
if ip is not None:
cli_logger.labeled_value("Received", ip)
return ip
cli_logger.print("Not yet available, retrying in {} seconds",
cf.bold(str(interval)))
time.sleep(interval)
return None
def _set_ssh_ip_if_required(self):
if self.ssh_ip is not None:
return
# We assume that this never changes.
# I think that's reasonable.
deadline = time.time() + NODE_START_WAIT_S
with LogTimer(self.log_prefix + "Got IP"):
ip = self._wait_for_ip(deadline)
cli_logger.doassert(ip is not None,
"Could not get node IP.") # todo: msg
assert ip is not None, "Unable to find IP of node"
self.ssh_ip = ip
# This should run before any SSH commands and therefore ensure that
# the ControlPath directory exists, allowing SSH to maintain
# persistent sessions later on.
try:
os.makedirs(self.ssh_control_path, mode=0o700, exist_ok=True)
except OSError as e:
cli_logger.warning("{}", str(e)) # todo: msg
cli_logger.old_warning(logger, "{}", str(e))
def _run_helper(self,
final_cmd,
with_output=False,
exit_on_fail=False,
silent=False):
"""Run a command that was already setup with SSH and `bash` settings.
Args:
cmd (List[str]):
Full command to run. Should include SSH options and other
processing that we do.
with_output (bool):
If `with_output` is `True`, command stdout and stderr
will be captured and returned.
exit_on_fail (bool):
If `exit_on_fail` is `True`, the process will exit
if the command fails (exits with a code other than 0).
Raises:
ProcessRunnerError if using new log style and disabled
login shells.
click.ClickException if using login shells.
"""
try:
# For now, if the output is needed we just skip the new logic.
# In the future we could update the new logic to support
# capturing output, but it is probably not needed.
if not cli_logger.old_style and not with_output:
return run_cmd_redirected(
final_cmd,
process_runner=self.process_runner,
silent=silent,
use_login_shells=is_using_login_shells())
if with_output:
return self.process_runner.check_output(final_cmd)
else:
return self.process_runner.check_call(final_cmd)
except subprocess.CalledProcessError as e:
quoted_cmd = " ".join(final_cmd[:-1] + [quote(final_cmd[-1])])
if not cli_logger.old_style and not is_using_login_shells():
raise ProcessRunnerError(
"Command failed",
"ssh_command_failed",
code=e.returncode,
command=quoted_cmd)
if exit_on_fail:
raise click.ClickException(
"Command failed:\n\n {}\n".format(quoted_cmd)) from None
else:
fail_msg = "SSH command failed."
if is_output_redirected():
fail_msg += " See above for the output from the failure."
raise click.ClickException(fail_msg) from None
def run(
self,
cmd,
timeout=120,
exit_on_fail=False,
port_forward=None,
with_output=False,
environment_variables: Dict[str, object] = None,
run_env="auto", # Unused argument.
ssh_options_override_ssh_key="",
shutdown_after_run=False,
):
if shutdown_after_run:
cmd += "; sudo shutdown -h now"
if ssh_options_override_ssh_key:
ssh_options = SSHOptions(ssh_options_override_ssh_key)
else:
ssh_options = self.ssh_options
assert isinstance(
ssh_options, SSHOptions
), "ssh_options must be of type SSHOptions, got {}".format(
type(ssh_options))
self._set_ssh_ip_if_required()
if is_using_login_shells():
ssh = ["ssh", "-tt"]
else:
ssh = ["ssh"]
if port_forward:
with cli_logger.group("Forwarding ports"):
if not isinstance(port_forward, list):
port_forward = [port_forward]
for local, remote in port_forward:
cli_logger.verbose(
"Forwarding port {} to port {} on localhost.",
cf.bold(local), cf.bold(remote)) # todo: msg
cli_logger.old_info(logger,
"{}Forwarding {} -> localhost:{}",
self.log_prefix, local, remote)
ssh += ["-L", "{}:localhost:{}".format(remote, local)]
final_cmd = ssh + ssh_options.to_ssh_options_list(timeout=timeout) + [
"{}@{}".format(self.ssh_user, self.ssh_ip)
]
if cmd:
if environment_variables:
cmd = _with_environment_variables(cmd, environment_variables)
if is_using_login_shells():
final_cmd += _with_interactive(cmd)
else:
final_cmd += [cmd]
cli_logger.old_info(logger, "{}Running {}", self.log_prefix,
" ".join(final_cmd))
else:
# We do this because `-o ControlMaster` causes the `-N` flag to
# still create an interactive shell in some ssh versions.
final_cmd.append("while true; do sleep 86400; done")
cli_logger.verbose("Running `{}`", cf.bold(cmd))
with cli_logger.indented():
cli_logger.very_verbose("Full command is `{}`",
cf.bold(" ".join(final_cmd)))
if cli_logger.verbosity > 0:
with cli_logger.indented():
return self._run_helper(final_cmd, with_output, exit_on_fail)
else:
return self._run_helper(final_cmd, with_output, exit_on_fail)
def run_rsync_up(self, source, target, options=None):
self._set_ssh_ip_if_required()
command = [
"rsync", "--rsh",
subprocess.list2cmdline(
["ssh"] + self.ssh_options.to_ssh_options_list(timeout=120)),
"-avz", source, "{}@{}:{}".format(self.ssh_user, self.ssh_ip,
target)
]
cli_logger.verbose("Running `{}`", cf.bold(" ".join(command)))
self._run_helper(command, silent=is_rsync_silent())
def run_rsync_down(self, source, target, options=None):
self._set_ssh_ip_if_required()
command = [
"rsync", "--rsh",
subprocess.list2cmdline(
["ssh"] + self.ssh_options.to_ssh_options_list(timeout=120)),
"-avz", "{}@{}:{}".format(self.ssh_user, self.ssh_ip,
source), target
]
cli_logger.verbose("Running `{}`", cf.bold(" ".join(command)))
self._run_helper(command, silent=is_rsync_silent())
def remote_shell_command_str(self):
if self.ssh_private_key:
return "ssh -o IdentitiesOnly=yes -i {} {}@{}\n".format(
self.ssh_private_key, self.ssh_user, self.ssh_ip)
else:
return "ssh -o IdentitiesOnly=yes {}@{}\n".format(
self.ssh_user, self.ssh_ip)
class DockerCommandRunner(CommandRunnerInterface):
def __init__(self, docker_config, **common_args):
self.ssh_command_runner = SSHCommandRunner(**common_args)
self.container_name = docker_config["container_name"]
self.docker_config = docker_config
self.home_dir = None
self.initialized = False
def run(
self,
cmd,
timeout=120,
exit_on_fail=False,
port_forward=None,
with_output=False,
environment_variables: Dict[str, object] = None,
run_env="auto",
ssh_options_override_ssh_key="",
shutdown_after_run=False,
):
if run_env == "auto":
run_env = "host" if cmd.find("docker") == 0 else "docker"
if environment_variables:
cmd = _with_environment_variables(cmd, environment_variables)
if run_env == "docker":
cmd = self._docker_expand_user(cmd, any_char=True)
cmd = " ".join(_with_interactive(cmd))
cmd = with_docker_exec(
[cmd],
container_name=self.container_name,
with_interactive=True)[0]
if shutdown_after_run:
# sudo shutdown should run after `with_docker_exec` command above
cmd += "; sudo shutdown -h now"
# Do not pass shutdown_after_run argument to ssh_command_runner.run()
# since it is handled above.
return self.ssh_command_runner.run(
cmd,
timeout=timeout,
exit_on_fail=exit_on_fail,
port_forward=port_forward,
with_output=with_output,
ssh_options_override_ssh_key=ssh_options_override_ssh_key)
def run_rsync_up(self, source, target, options=None):
options = options or {}
host_destination = os.path.join(DOCKER_MOUNT_PREFIX,
target.lstrip("/"))
self.ssh_command_runner.run(
f"mkdir -p {os.path.dirname(host_destination.rstrip("/"))}")
self.ssh_command_runner.run_rsync_up(
source, host_destination, options=None)
if self._check_container_status() and not options.get(
"file_mount", False):
if os.path.isdir(source):
# Adding a "." means that docker copies the *contents*
# Without it, docker copies the source *into* the target
host_destination += "/."
self.ssh_command_runner.run("docker cp {} {}:{}".format(
host_destination, self.container_name,
self._docker_expand_user(target)))
def run_rsync_down(self, source, target, options=None):
options = options or {}
host_source = os.path.join(DOCKER_MOUNT_PREFIX, source.lstrip("/"))
self.ssh_command_runner.run(
f"mkdir -p {os.path.dirname(host_source.rstrip("/"))}")
if source[-1] == "/":
source += "."
# Adding a "." means that docker copies the *contents*
# Without it, docker copies the source *into* the target
if not options.get("file_mount", False):
self.ssh_command_runner.run("docker cp {}:{} {}".format(
self.container_name, self._docker_expand_user(source),
host_source))
self.ssh_command_runner.run_rsync_down(
host_source, target, options=None)
def remote_shell_command_str(self):
inner_str = self.ssh_command_runner.remote_shell_command_str().replace(
"ssh", "ssh -tt", 1).strip("\n")
return inner_str + " docker exec -it {} /bin/bash\n".format(
self.container_name)
def _check_docker_installed(self):
no_exist = "NoExist"
output = self.ssh_command_runner.run(
f"command -v docker || echo '{no_exist}'", with_output=True)
cleaned_output = output.decode().strip()
if no_exist in cleaned_output or "docker" not in cleaned_output:
install_commands = [
"curl -fsSL https://get.docker.com -o get-docker.sh",
"sudo sh get-docker.sh", "sudo usermod -aG docker $USER",
"sudo systemctl restart docker -f"
]
logger.error(
"Docker not installed. You can install Docker by adding the "
"following commands to 'initialization_commands':\n" +
"\n".join(install_commands))
def _check_container_status(self):
if self.initialized:
return True
output = self.ssh_command_runner.run(
check_docker_running_cmd(self.container_name),
with_output=True).decode("utf-8").strip()
# Checks for the false positive where "true" is in the container name
return ("true" in output.lower()
and "no such object" not in output.lower())
def _docker_expand_user(self, string, any_char=False):
user_pos = string.find("~")
if user_pos > -1:
if self.home_dir is None:
self.home_dir = self.ssh_command_runner.run(
"docker exec {} env | grep HOME | cut -d'=' -f2".format(
self.container_name),
with_output=True).decode("utf-8").strip()
if any_char:
return string.replace("~/", self.home_dir + "/")
elif not any_char and user_pos == 0:
return string.replace("~", self.home_dir, 1)
return string
def run_init(self, *, as_head, file_mounts):
BOOTSTRAP_MOUNTS = [
"~/ray_bootstrap_config.yaml", "~/ray_bootstrap_key.pem"
]
image = self.docker_config.get("image")
image = self.docker_config.get(
f"{"head" if as_head else "worker"}_image", image)
self._check_docker_installed()
if self.docker_config.get("pull_before_run", True):
assert image, "Image must be included in config if " + \
"pull_before_run is specified"
self.run("docker pull {}".format(image), run_env="host")
# Bootstrap files cannot be bind mounted because docker opens the
# underlying inode. When the file is switched, docker becomes outdated.
cleaned_bind_mounts = file_mounts.copy()
for mnt in BOOTSTRAP_MOUNTS:
cleaned_bind_mounts.pop(mnt, None)
start_command = docker_start_cmds(
self.ssh_command_runner.ssh_user, image, cleaned_bind_mounts,
self.container_name,
self.docker_config.get("run_options", []) + self.docker_config.get(
f"{"head" if as_head else "worker"}_run_options", []))
if not self._check_container_status():
self.run(start_command, run_env="host")
else:
running_image = self.run(
check_docker_image(self.container_name),
with_output=True,
run_env="host").decode("utf-8").strip()
if running_image != image:
logger.error(f"A container with name {self.container_name} " +
f"is running image {running_image} instead " +
f"of {image} (which was provided in the YAML")
mounts = self.run(
check_bind_mounts_cmd(self.container_name),
with_output=True,
run_env="host").decode("utf-8").strip()
try:
active_mounts = json.loads(mounts)
active_remote_mounts = [
mnt["Destination"] for mnt in active_mounts
]
# Ignore ray bootstrap files.
for remote, local in cleaned_bind_mounts.items():
remote = self._docker_expand_user(remote)
if remote not in active_remote_mounts:
cli_logger.error(
"Please ray stop & restart cluster to "
f"allow mount {remote}:{local} to take hold")
except json.JSONDecodeError:
cli_logger.verbose(
"Unable to check if file_mounts specified in the YAML "
"differ from those on the running container.")
# Explicitly copy in ray bootstrap files.
for mount in BOOTSTRAP_MOUNTS:
if mount in file_mounts:
self.ssh_command_runner.run(
"docker cp {src} {container}:{dst}".format(
src=os.path.join(DOCKER_MOUNT_PREFIX, mount),
container=self.container_name,
dst=self._docker_expand_user(mount)))
self.initialized = True
|
from getpass import getuser
from shlex import quote
from typing import Dict
import click
import hashlib
import json
import logging
import os
import subprocess
import sys
import time
import warnings
from ray.autoscaler.command_runner import CommandRunnerInterface
from ray.autoscaler._private.docker import check_bind_mounts_cmd, \
check_docker_running_cmd, \
check_docker_image, \
docker_start_cmds, \
DOCKER_MOUNT_PREFIX, \
with_docker_exec
from ray.autoscaler._private.log_timer import LogTimer
from ray.autoscaler._private.subprocess_output_util import (
run_cmd_redirected, ProcessRunnerError, is_output_redirected)
from ray.autoscaler._private.cli_logger import cli_logger
import colorful as cf
logger = logging.getLogger(__name__)
# How long to wait for a node to start, in seconds
NODE_START_WAIT_S = 300
HASH_MAX_LENGTH = 10
KUBECTL_RSYNC = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "kubernetes/kubectl-rsync.sh")
_config = {"use_login_shells": True, "silent_rsync": True}
def is_rsync_silent():
return _config["silent_rsync"]
def set_rsync_silent(val):
"""Choose whether to silence rsync output.
Most commands will want to list rsync'd files themselves rather than
print the default rsync spew.
"""
_config["silent_rsync"] = val
def is_using_login_shells():
return _config["use_login_shells"]
def set_using_login_shells(val):
"""Choose between login and non-interactive shells.
Non-interactive shells have the benefit of receiving less output from
subcommands (since progress bars and TTY control codes are not printed).
Sometimes this can be significant since e.g. `pip install` prints
hundreds of progress bar lines when downloading.
Login shells have the benefit of working very close to how a proper bash
session does, regarding how scripts execute and how the environment is
setup. This is also how all commands were ran in the past. The only reason
to use login shells over non-interactive shells is if you need some weird
and non-robust tool to work.
Args:
val (bool): If true, login shells will be used to run all commands.
"""
_config["use_login_shells"] = val
def _with_environment_variables(cmd: str,
environment_variables: Dict[str, object]):
"""Prepend environment variables to a shell command.
Args:
cmd (str): The base command.
environment_variables (Dict[str, object]): The set of environment
variables. If an environment variable value is a dict, it will
automatically be converted to a one line yaml string.
"""
as_strings = []
for key, val in environment_variables.items():
val = json.dumps(val, separators=(",", ":"))
s = "export {}={};".format(key, quote(val))
as_strings.append(s)
all_vars = "".join(as_strings)
return all_vars + cmd
def _with_interactive(cmd):
force_interactive = (
f"true && source ~/.bashrc && "
f"export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && ({cmd})")
return ["bash", "--login", "-c", "-i", quote(force_interactive)]
class KubernetesCommandRunner(CommandRunnerInterface):
def __init__(self, log_prefix, namespace, node_id, auth_config,
process_runner):
self.log_prefix = log_prefix
self.process_runner = process_runner
self.node_id = str(node_id)
self.namespace = namespace
self.kubectl = ["kubectl", "-n", self.namespace]
def run(
self,
cmd=None,
timeout=120,
exit_on_fail=False,
port_forward=None,
with_output=False,
environment_variables: Dict[str, object] = None,
run_env="auto", # Unused argument.
ssh_options_override_ssh_key="", # Unused argument.
shutdown_after_run=False,
):
if shutdown_after_run:
cmd += "; sudo shutdown -h now"
if cmd and port_forward:
raise Exception(
"exec with Kubernetes can't forward ports and execute"
"commands together.")
if port_forward:
if not isinstance(port_forward, list):
port_forward = [port_forward]
port_forward_cmd = self.kubectl + [
"port-forward",
self.node_id,
] + [
"{}:{}".format(local, remote) for local, remote in port_forward
]
logger.info("Port forwarding with: {}".format(
" ".join(port_forward_cmd)))
port_forward_process = subprocess.Popen(port_forward_cmd)
port_forward_process.wait()
# We should never get here, this indicates that port forwarding
# failed, likely because we couldn't bind to a port.
pout, perr = port_forward_process.communicate()
exception_str = " ".join(
port_forward_cmd) + " failed with error: " + perr
raise Exception(exception_str)
else:
final_cmd = self.kubectl + ["exec", "-it"]
final_cmd += [
self.node_id,
"--",
]
if environment_variables:
cmd = _with_environment_variables(cmd, environment_variables)
cmd = _with_interactive(cmd)
cmd_prefix = " ".join(final_cmd)
final_cmd += cmd
# `kubectl exec` + subprocess w/ list of args has unexpected
# side-effects.
final_cmd = " ".join(final_cmd)
logger.info(self.log_prefix + "Running {}".format(final_cmd))
try:
if with_output:
return self.process_runner.check_output(
final_cmd, shell=True)
else:
self.process_runner.check_call(final_cmd, shell=True)
except subprocess.CalledProcessError:
if exit_on_fail:
quoted_cmd = cmd_prefix + quote(" ".join(cmd))
logger.error(
self.log_prefix +
"Command failed: \n\n {}\n".format(quoted_cmd))
sys.exit(1)
else:
raise
def run_rsync_up(self, source, target, options=None):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call([
KUBECTL_RSYNC,
"-avz",
source,
"{}@{}:{}".format(self.node_id, self.namespace, target),
])
except Exception as e:
warnings.warn(
self.log_prefix +
"rsync failed: '{}'. Falling back to 'kubectl cp'".format(e),
UserWarning)
if target.startswith("~"):
target = "/root" + target[1:]
self.process_runner.check_call(self.kubectl + [
"cp", source, "{}/{}:{}".format(self.namespace, self.node_id,
target)
])
def run_rsync_down(self, source, target, options=None):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call([
KUBECTL_RSYNC,
"-avz",
"{}@{}:{}".format(self.node_id, self.namespace, source),
target,
])
except Exception as e:
warnings.warn(
self.log_prefix +
"rsync failed: '{}'. Falling back to 'kubectl cp'".format(e),
UserWarning)
if target.startswith("~"):
target = "/root" + target[1:]
self.process_runner.check_call(self.kubectl + [
"cp", "{}/{}:{}".format(self.namespace, self.node_id, source),
target
])
def remote_shell_command_str(self):
return "{} exec -it {} bash".format(" ".join(self.kubectl),
self.node_id)
class SSHOptions:
def __init__(self, ssh_key, control_path=None, **kwargs):
self.ssh_key = ssh_key
self.arg_dict = {
# Supresses initial fingerprint verification.
"StrictHostKeyChecking": "no",
# SSH IP and fingerprint pairs no longer added to known_hosts.
# This is to remove a "REMOTE HOST IDENTIFICATION HAS CHANGED"
# warning if a new node has the same IP as a previously
# deleted node, because the fingerprints will not match in
# that case.
"UserKnownHostsFile": os.devnull,
# Try fewer extraneous key pairs.
"IdentitiesOnly": "yes",
# Abort if port forwarding fails (instead of just printing to
# stderr).
"ExitOnForwardFailure": "yes",
# Quickly kill the connection if network connection breaks (as
# opposed to hanging/blocking).
"ServerAliveInterval": 5,
"ServerAliveCountMax": 3
}
if control_path:
self.arg_dict.update({
"ControlMaster": "auto",
"ControlPath": "{}/%C".format(control_path),
"ControlPersist": "10s",
})
self.arg_dict.update(kwargs)
def to_ssh_options_list(self, *, timeout=60):
self.arg_dict["ConnectTimeout"] = "{}s".format(timeout)
ssh_key_option = ["-i", self.ssh_key] if self.ssh_key else []
return ssh_key_option + [
x for y in (["-o", "{}={}".format(k, v)]
for k, v in self.arg_dict.items()
if v is not None) for x in y
]
class SSHCommandRunner(CommandRunnerInterface):
def __init__(self, log_prefix, node_id, provider, auth_config,
cluster_name, process_runner, use_internal_ip):
ssh_control_hash = hashlib.md5(cluster_name.encode()).hexdigest()
ssh_user_hash = hashlib.md5(getuser().encode()).hexdigest()
ssh_control_path = "/tmp/ray_ssh_{}/{}".format(
ssh_user_hash[:HASH_MAX_LENGTH],
ssh_control_hash[:HASH_MAX_LENGTH])
self.log_prefix = log_prefix
self.process_runner = process_runner
self.node_id = node_id
self.use_internal_ip = use_internal_ip
self.provider = provider
self.ssh_private_key = auth_config.get("ssh_private_key")
self.ssh_user = auth_config["ssh_user"]
self.ssh_control_path = ssh_control_path
self.ssh_ip = None
self.ssh_proxy_command = auth_config.get("ssh_proxy_command", None)
self.ssh_options = SSHOptions(
self.ssh_private_key,
self.ssh_control_path,
ProxyCommand=self.ssh_proxy_command)
def _get_node_ip(self):
if self.use_internal_ip:
return self.provider.internal_ip(self.node_id)
else:
return self.provider.external_ip(self.node_id)
def _wait_for_ip(self, deadline):
# if we have IP do not print waiting info
ip = self._get_node_ip()
if ip is not None:
cli_logger.labeled_value("Fetched IP", ip)
return ip
interval = 10
with cli_logger.timed("Waiting for IP"):
while time.time() < deadline and \
not self.provider.is_terminated(self.node_id):
cli_logger.old_info(logger, "{}Waiting for IP...",
self.log_prefix)
ip = self._get_node_ip()
if ip is not None:
cli_logger.labeled_value("Received", ip)
return ip
cli_logger.print("Not yet available, retrying in {} seconds",
cf.bold(str(interval)))
time.sleep(interval)
return None
def _set_ssh_ip_if_required(self):
if self.ssh_ip is not None:
return
# We assume that this never changes.
# I think that's reasonable.
deadline = time.time() + NODE_START_WAIT_S
with LogTimer(self.log_prefix + "Got IP"):
ip = self._wait_for_ip(deadline)
cli_logger.doassert(ip is not None,
"Could not get node IP.") # todo: msg
assert ip is not None, "Unable to find IP of node"
self.ssh_ip = ip
# This should run before any SSH commands and therefore ensure that
# the ControlPath directory exists, allowing SSH to maintain
# persistent sessions later on.
try:
os.makedirs(self.ssh_control_path, mode=0o700, exist_ok=True)
except OSError as e:
cli_logger.warning("{}", str(e)) # todo: msg
cli_logger.old_warning(logger, "{}", str(e))
def _run_helper(self,
final_cmd,
with_output=False,
exit_on_fail=False,
silent=False):
"""Run a command that was already setup with SSH and `bash` settings.
Args:
cmd (List[str]):
Full command to run. Should include SSH options and other
processing that we do.
with_output (bool):
If `with_output` is `True`, command stdout and stderr
will be captured and returned.
exit_on_fail (bool):
If `exit_on_fail` is `True`, the process will exit
if the command fails (exits with a code other than 0).
Raises:
ProcessRunnerError if using new log style and disabled
login shells.
click.ClickException if using login shells.
"""
try:
# For now, if the output is needed we just skip the new logic.
# In the future we could update the new logic to support
# capturing output, but it is probably not needed.
if not cli_logger.old_style and not with_output:
return run_cmd_redirected(
final_cmd,
process_runner=self.process_runner,
silent=silent,
use_login_shells=is_using_login_shells())
if with_output:
return self.process_runner.check_output(final_cmd)
else:
return self.process_runner.check_call(final_cmd)
except subprocess.CalledProcessError as e:
quoted_cmd = " ".join(final_cmd[:-1] + [quote(final_cmd[-1])])
if not cli_logger.old_style and not is_using_login_shells():
raise ProcessRunnerError(
"Command failed",
"ssh_command_failed",
code=e.returncode,
command=quoted_cmd)
if exit_on_fail:
raise click.ClickException(
"Command failed:\n\n {}\n".format(quoted_cmd)) from None
else:
fail_msg = "SSH command failed."
if is_output_redirected():
fail_msg += " See above for the output from the failure."
raise click.ClickException(fail_msg) from None
def run(
self,
cmd,
timeout=120,
exit_on_fail=False,
port_forward=None,
with_output=False,
environment_variables: Dict[str, object] = None,
run_env="auto", # Unused argument.
ssh_options_override_ssh_key="",
shutdown_after_run=False,
):
if shutdown_after_run:
cmd += "; sudo shutdown -h now"
if ssh_options_override_ssh_key:
ssh_options = SSHOptions(ssh_options_override_ssh_key)
else:
ssh_options = self.ssh_options
assert isinstance(
ssh_options, SSHOptions
), "ssh_options must be of type SSHOptions, got {}".format(
type(ssh_options))
self._set_ssh_ip_if_required()
if is_using_login_shells():
ssh = ["ssh", "-tt"]
else:
ssh = ["ssh"]
if port_forward:
with cli_logger.group("Forwarding ports"):
if not isinstance(port_forward, list):
port_forward = [port_forward]
for local, remote in port_forward:
cli_logger.verbose(
"Forwarding port {} to port {} on localhost.",
cf.bold(local), cf.bold(remote)) # todo: msg
cli_logger.old_info(logger,
"{}Forwarding {} -> localhost:{}",
self.log_prefix, local, remote)
ssh += ["-L", "{}:localhost:{}".format(remote, local)]
final_cmd = ssh + ssh_options.to_ssh_options_list(timeout=timeout) + [
"{}@{}".format(self.ssh_user, self.ssh_ip)
]
if cmd:
if environment_variables:
cmd = _with_environment_variables(cmd, environment_variables)
if is_using_login_shells():
final_cmd += _with_interactive(cmd)
else:
final_cmd += [cmd]
cli_logger.old_info(logger, "{}Running {}", self.log_prefix,
" ".join(final_cmd))
else:
# We do this because `-o ControlMaster` causes the `-N` flag to
# still create an interactive shell in some ssh versions.
final_cmd.append("while true; do sleep 86400; done")
cli_logger.verbose("Running `{}`", cf.bold(cmd))
with cli_logger.indented():
cli_logger.very_verbose("Full command is `{}`",
cf.bold(" ".join(final_cmd)))
if cli_logger.verbosity > 0:
with cli_logger.indented():
return self._run_helper(final_cmd, with_output, exit_on_fail)
else:
return self._run_helper(final_cmd, with_output, exit_on_fail)
def run_rsync_up(self, source, target, options=None):
self._set_ssh_ip_if_required()
command = [
"rsync", "--rsh",
subprocess.list2cmdline(
["ssh"] + self.ssh_options.to_ssh_options_list(timeout=120)),
"-avz", source, "{}@{}:{}".format(self.ssh_user, self.ssh_ip,
target)
]
cli_logger.verbose("Running `{}`", cf.bold(" ".join(command)))
self._run_helper(command, silent=is_rsync_silent())
def run_rsync_down(self, source, target, options=None):
self._set_ssh_ip_if_required()
command = [
"rsync", "--rsh",
subprocess.list2cmdline(
["ssh"] + self.ssh_options.to_ssh_options_list(timeout=120)),
"-avz", "{}@{}:{}".format(self.ssh_user, self.ssh_ip,
source), target
]
cli_logger.verbose("Running `{}`", cf.bold(" ".join(command)))
self._run_helper(command, silent=is_rsync_silent())
def remote_shell_command_str(self):
if self.ssh_private_key:
return "ssh -o IdentitiesOnly=yes -i {} {}@{}\n".format(
self.ssh_private_key, self.ssh_user, self.ssh_ip)
else:
return "ssh -o IdentitiesOnly=yes {}@{}\n".format(
self.ssh_user, self.ssh_ip)
class DockerCommandRunner(CommandRunnerInterface):
def __init__(self, docker_config, **common_args):
self.ssh_command_runner = SSHCommandRunner(**common_args)
self.container_name = docker_config["container_name"]
self.docker_config = docker_config
self.home_dir = None
self.initialized = False
def run(
self,
cmd,
timeout=120,
exit_on_fail=False,
port_forward=None,
with_output=False,
environment_variables: Dict[str, object] = None,
run_env="auto",
ssh_options_override_ssh_key="",
shutdown_after_run=False,
):
if run_env == "auto":
run_env = "host" if cmd.find("docker") == 0 else "docker"
if environment_variables:
cmd = _with_environment_variables(cmd, environment_variables)
if run_env == "docker":
cmd = self._docker_expand_user(cmd, any_char=True)
cmd = " ".join(_with_interactive(cmd))
cmd = with_docker_exec(
[cmd],
container_name=self.container_name,
with_interactive=True)[0]
if shutdown_after_run:
# sudo shutdown should run after `with_docker_exec` command above
cmd += "; sudo shutdown -h now"
# Do not pass shutdown_after_run argument to ssh_command_runner.run()
# since it is handled above.
return self.ssh_command_runner.run(
cmd,
timeout=timeout,
exit_on_fail=exit_on_fail,
port_forward=port_forward,
with_output=with_output,
ssh_options_override_ssh_key=ssh_options_override_ssh_key)
def run_rsync_up(self, source, target, options=None):
options = options or {}
host_destination = os.path.join(DOCKER_MOUNT_PREFIX,
target.lstrip("/"))
self.ssh_command_runner.run(
f"mkdir -p {os.path.dirname(host_destination.rstrip('/'))}")
self.ssh_command_runner.run_rsync_up(
source, host_destination, options=None)
if self._check_container_status() and not options.get(
"file_mount", False):
if os.path.isdir(source):
# Adding a "." means that docker copies the *contents*
# Without it, docker copies the source *into* the target
host_destination += "/."
self.ssh_command_runner.run("docker cp {} {}:{}".format(
host_destination, self.container_name,
self._docker_expand_user(target)))
def run_rsync_down(self, source, target, options=None):
options = options or {}
host_source = os.path.join(DOCKER_MOUNT_PREFIX, source.lstrip("/"))
self.ssh_command_runner.run(
f"mkdir -p {os.path.dirname(host_source.rstrip('/'))}")
if source[-1] == "/":
source += "."
# Adding a "." means that docker copies the *contents*
# Without it, docker copies the source *into* the target
if not options.get("file_mount", False):
self.ssh_command_runner.run("docker cp {}:{} {}".format(
self.container_name, self._docker_expand_user(source),
host_source))
self.ssh_command_runner.run_rsync_down(
host_source, target, options=None)
def remote_shell_command_str(self):
inner_str = self.ssh_command_runner.remote_shell_command_str().replace(
"ssh", "ssh -tt", 1).strip("\n")
return inner_str + " docker exec -it {} /bin/bash\n".format(
self.container_name)
def _check_docker_installed(self):
no_exist = "NoExist"
output = self.ssh_command_runner.run(
f"command -v docker || echo '{no_exist}'", with_output=True)
cleaned_output = output.decode().strip()
if no_exist in cleaned_output or "docker" not in cleaned_output:
install_commands = [
"curl -fsSL https://get.docker.com -o get-docker.sh",
"sudo sh get-docker.sh", "sudo usermod -aG docker $USER",
"sudo systemctl restart docker -f"
]
logger.error(
"Docker not installed. You can install Docker by adding the "
"following commands to 'initialization_commands':\n" +
"\n".join(install_commands))
def _check_container_status(self):
if self.initialized:
return True
output = self.ssh_command_runner.run(
check_docker_running_cmd(self.container_name),
with_output=True).decode("utf-8").strip()
# Checks for the false positive where "true" is in the container name
return ("true" in output.lower()
and "no such object" not in output.lower())
def _docker_expand_user(self, string, any_char=False):
user_pos = string.find("~")
if user_pos > -1:
if self.home_dir is None:
self.home_dir = self.ssh_command_runner.run(
"docker exec {} env | grep HOME | cut -d'=' -f2".format(
self.container_name),
with_output=True).decode("utf-8").strip()
if any_char:
return string.replace("~/", self.home_dir + "/")
elif not any_char and user_pos == 0:
return string.replace("~", self.home_dir, 1)
return string
def run_init(self, *, as_head, file_mounts):
BOOTSTRAP_MOUNTS = [
"~/ray_bootstrap_config.yaml", "~/ray_bootstrap_key.pem"
]
image = self.docker_config.get("image")
image = self.docker_config.get(
f"{'head' if as_head else 'worker'}_image", image)
self._check_docker_installed()
if self.docker_config.get("pull_before_run", True):
assert image, "Image must be included in config if " + \
"pull_before_run is specified"
self.run("docker pull {}".format(image), run_env="host")
# Bootstrap files cannot be bind mounted because docker opens the
# underlying inode. When the file is switched, docker becomes outdated.
cleaned_bind_mounts = file_mounts.copy()
for mnt in BOOTSTRAP_MOUNTS:
cleaned_bind_mounts.pop(mnt, None)
start_command = docker_start_cmds(
self.ssh_command_runner.ssh_user, image, cleaned_bind_mounts,
self.container_name,
self.docker_config.get("run_options", []) + self.docker_config.get(
f"{'head' if as_head else 'worker'}_run_options", []))
if not self._check_container_status():
self.run(start_command, run_env="host")
else:
running_image = self.run(
check_docker_image(self.container_name),
with_output=True,
run_env="host").decode("utf-8").strip()
if running_image != image:
logger.error(f"A container with name {self.container_name} " +
f"is running image {running_image} instead " +
f"of {image} (which was provided in the YAML")
mounts = self.run(
check_bind_mounts_cmd(self.container_name),
with_output=True,
run_env="host").decode("utf-8").strip()
try:
active_mounts = json.loads(mounts)
active_remote_mounts = [
mnt["Destination"] for mnt in active_mounts
]
# Ignore ray bootstrap files.
for remote, local in cleaned_bind_mounts.items():
remote = self._docker_expand_user(remote)
if remote not in active_remote_mounts:
cli_logger.error(
"Please ray stop & restart cluster to "
f"allow mount {remote}:{local} to take hold")
except json.JSONDecodeError:
cli_logger.verbose(
"Unable to check if file_mounts specified in the YAML "
"differ from those on the running container.")
# Explicitly copy in ray bootstrap files.
for mount in BOOTSTRAP_MOUNTS:
if mount in file_mounts:
self.ssh_command_runner.run(
"docker cp {src} {container}:{dst}".format(
src=os.path.join(DOCKER_MOUNT_PREFIX, mount),
container=self.container_name,
dst=self._docker_expand_user(mount)))
self.initialized = True
|
import datetime
import json
import multiprocessing
import os
import random
import re
import time
import discum
version = 'v0.01'
config_path = 'data/config.json'
logo = f'''
###### ### ### ## ####### ### ## ## ###
## ## ## ## ## ## ## ## ## ## ## ## ## ##
## ## ## ## ## ## ## ## ## ## ## ## ##
##### ## ## #### #### ## ## ## # ## ## ##
## ## ## ## ## ## ## ## ####### ## ##
## ## ## ## ## ## ## ## ## ### ### ## ##
#### ### ### ## ####### ### ## ## ###
~ Pokétwo Autocatcher {version}
'''
num_pokemon = 0
shiny = 0
legendary = 0
mythical = 0
poketwo_id = '716390085896962058'
def auto_config():
global user_token, channel_id
if not os.path.exists(config_path):
with open(config_path, "a") as file:
auth_token = input("Enter you Discord auth token: ")
channel_id = input("Enter the preferred Channel ID for spamming and catching: ")
file.write("{\n")
file.write(f' "user_token" : "{auth_token}",\n')
file.write(f' "channel_id" : "{channel_id}"\n')
file.write("}")
os.system('cls' if os.name=='nt' else 'clear')
with open(config_path,'r') as file:
info = json.loads(file.read())
user_token = info['user_token']
channel_id = info['channel_id']
with open('data/pokemon.txt', 'r', encoding='utf8') as file:
pokemon_list = file.read()
with open('data/legendary.txt','r') as file:
legendary_list = file.read()
with open('data/mythical.txt','r') as file:
mythical_list = file.read()
auto_config()
print(logo)
bot = discum.Client(token=user_token, log=False)
def solve(message):
hint = [message[i] for i in range(15, len(message) - 1) if message[i] != '\\']
hint_string = ''.join(hint)
return re.findall(
'^' + hint_string.replace('_', '.') + '$', pokemon_list, re.MULTILINE
)
def spam():
while True:
random_number = random.getrandbits(128)
bot.sendMessage(channel_id, random_number)
intervals = [2.0,2.1,2.2,2.3,2.4,2.5]
time.sleep(random.choice(intervals))
def start_spam():
new_process = multiprocessing.Process(target=spam)
new_process.start()
return new_process
def stop(process):
process.terminate()
def log(string):
now = datetime.datetime.now()
current_time = now.strftime('%H:%M:%S')
print(f'[{current_time}]', string)
@bot.gateway.command
def on_ready(resp):
if resp.event.ready_supplemental:
user = bot.gateway.session.user
log(f'Logged into account: {user['username']}#{user['discriminator']}')
@bot.gateway.command
def on_message(resp):
global spam_process
if resp.event.message:
m = resp.parsed.auto()
if m['channel_id'] == channel_id and m['author']['id'] == poketwo_id:
if m['embeds']:
embed_title = m['embeds'][0]['title']
if 'wild pokémon has appeared!' in embed_title:
stop(spam_process)
time.sleep(2)
bot.sendMessage(channel_id, '<@716390085896962058> h')
elif "Congratulations" in embed_title:
embed_content = m['embeds'][0]['description']
if 'now level' in embed_content:
stop(spam_process)
split = embed_content.split(' ')
a = embed_content.count(' ')
level = int(split[a].replace('!', ''))
if level == 100:
#wait will implement in next update
pass
spam_process = start_spam()
else:
content = m['content']
if 'The pokémon is ' in content:
if len(solve(content)) == 0:
log('Pokemon not found.')
else:
for i in solve(content):
stop(spam_process)
time.sleep(2)
bot.sendMessage(channel_id, f'<@716390085896962058> c {i}')
time.sleep(2)
spam_process = start_spam()
elif 'Congratulations' in content:
global shiny
global legendary
global num_pokemon
global mythical
num_pokemon += 1
split = content.split(' ')
pokemon = split[7].replace('!','')
if 'These colors seem unusual...' in content:
shiny += 1
log(f'A shiny Pokémon was caught! Pokémon: {pokemon}')
log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}')
elif re.findall(
f'^{pokemon}$', legendary_list, re.MULTILINE
):
legendary += 1
log(f'A legendary Pokémon was caught! Pokémon: {pokemon}')
log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}')
elif re.findall(f'^{pokemon}$', mythical_list, re.MULTILINE):
mythical += 1
log(f'A mythical Pokémon was caught! Pokémon: {pokemon}')
log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}')
else:
print(f'Total Pokémon Caught: {num_pokemon}')
elif 'human' in content:
stop(spam_process)
log('Captcha Detected; Autocatcher Paused. Press enter to restart.')
input()
bot.sendMessage(channel_id, '<@716390085896962058> h')
if __name__ == '__main__':
print('\nEvent Log:')
spam_process = start_spam()
bot.gateway.run(auto_reconnect=True)
|
import datetime
import json
import multiprocessing
import os
import random
import re
import time
import discum
version = 'v0.01'
config_path = 'data/config.json'
logo = f'''
###### ### ### ## ####### ### ## ## ###
## ## ## ## ## ## ## ## ## ## ## ## ## ##
## ## ## ## ## ## ## ## ## ## ## ## ##
##### ## ## #### #### ## ## ## # ## ## ##
## ## ## ## ## ## ## ## ####### ## ##
## ## ## ## ## ## ## ## ## ### ### ## ##
#### ### ### ## ####### ### ## ## ###
~ Pokétwo Autocatcher {version}
'''
num_pokemon = 0
shiny = 0
legendary = 0
mythical = 0
poketwo_id = '716390085896962058'
def auto_config():
global user_token, channel_id
if not os.path.exists(config_path):
with open(config_path, "a") as file:
auth_token = input("Enter you Discord auth token: ")
channel_id = input("Enter the preferred Channel ID for spamming and catching: ")
file.write("{\n")
file.write(f' "user_token" : "{auth_token}",\n')
file.write(f' "channel_id" : "{channel_id}"\n')
file.write("}")
os.system('cls' if os.name=='nt' else 'clear')
with open(config_path,'r') as file:
info = json.loads(file.read())
user_token = info['user_token']
channel_id = info['channel_id']
with open('data/pokemon.txt', 'r', encoding='utf8') as file:
pokemon_list = file.read()
with open('data/legendary.txt','r') as file:
legendary_list = file.read()
with open('data/mythical.txt','r') as file:
mythical_list = file.read()
auto_config()
print(logo)
bot = discum.Client(token=user_token, log=False)
def solve(message):
hint = [message[i] for i in range(15, len(message) - 1) if message[i] != '\\']
hint_string = ''.join(hint)
return re.findall(
'^' + hint_string.replace('_', '.') + '$', pokemon_list, re.MULTILINE
)
def spam():
while True:
random_number = random.getrandbits(128)
bot.sendMessage(channel_id, random_number)
intervals = [2.0,2.1,2.2,2.3,2.4,2.5]
time.sleep(random.choice(intervals))
def start_spam():
new_process = multiprocessing.Process(target=spam)
new_process.start()
return new_process
def stop(process):
process.terminate()
def log(string):
now = datetime.datetime.now()
current_time = now.strftime('%H:%M:%S')
print(f'[{current_time}]', string)
@bot.gateway.command
def on_ready(resp):
if resp.event.ready_supplemental:
user = bot.gateway.session.user
log(f'Logged into account: {user["username"]}#{user["discriminator"]}')
@bot.gateway.command
def on_message(resp):
global spam_process
if resp.event.message:
m = resp.parsed.auto()
if m['channel_id'] == channel_id and m['author']['id'] == poketwo_id:
if m['embeds']:
embed_title = m['embeds'][0]['title']
if 'wild pokémon has appeared!' in embed_title:
stop(spam_process)
time.sleep(2)
bot.sendMessage(channel_id, '<@716390085896962058> h')
elif "Congratulations" in embed_title:
embed_content = m['embeds'][0]['description']
if 'now level' in embed_content:
stop(spam_process)
split = embed_content.split(' ')
a = embed_content.count(' ')
level = int(split[a].replace('!', ''))
if level == 100:
#wait will implement in next update
pass
spam_process = start_spam()
else:
content = m['content']
if 'The pokémon is ' in content:
if len(solve(content)) == 0:
log('Pokemon not found.')
else:
for i in solve(content):
stop(spam_process)
time.sleep(2)
bot.sendMessage(channel_id, f'<@716390085896962058> c {i}')
time.sleep(2)
spam_process = start_spam()
elif 'Congratulations' in content:
global shiny
global legendary
global num_pokemon
global mythical
num_pokemon += 1
split = content.split(' ')
pokemon = split[7].replace('!','')
if 'These colors seem unusual...' in content:
shiny += 1
log(f'A shiny Pokémon was caught! Pokémon: {pokemon}')
log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}')
elif re.findall(
f'^{pokemon}$', legendary_list, re.MULTILINE
):
legendary += 1
log(f'A legendary Pokémon was caught! Pokémon: {pokemon}')
log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}')
elif re.findall(f'^{pokemon}$', mythical_list, re.MULTILINE):
mythical += 1
log(f'A mythical Pokémon was caught! Pokémon: {pokemon}')
log(f'Shiny: {shiny} | Legendary: {legendary} | Mythical: {mythical}')
else:
print(f'Total Pokémon Caught: {num_pokemon}')
elif 'human' in content:
stop(spam_process)
log('Captcha Detected; Autocatcher Paused. Press enter to restart.')
input()
bot.sendMessage(channel_id, '<@716390085896962058> h')
if __name__ == '__main__':
print('\nEvent Log:')
spam_process = start_spam()
bot.gateway.run(auto_reconnect=True)
|
# -*- coding: utf-8 -*-
"""Functions to make simple plots with M/EEG data."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import base64
import copy
from glob import glob
from io import BytesIO
from itertools import cycle
import os.path as op
import warnings
from distutils.version import LooseVersion
from collections import defaultdict
import numpy as np
from ..defaults import DEFAULTS
from ..fixes import _get_img_fdata
from ..rank import compute_rank
from ..surface import read_surface
from ..io.constants import FIFF
from ..io.proj import make_projector
from ..io.pick import (_DATA_CH_TYPES_SPLIT, pick_types, pick_info,
pick_channels)
from ..source_space import (read_source_spaces, SourceSpaces,
_check_mri, _ensure_src)
from ..transforms import invert_transform, apply_trans, _frame_to_str
from ..utils import (logger, verbose, warn, _check_option, get_subjects_dir,
_mask_to_onsets_offsets, _pl, _on_missing, fill_doc)
from ..io.pick import _picks_by_type
from ..filter import estimate_ringing_samples
from .utils import (tight_layout, _get_color_list, _prepare_trellis, plt_show,
_figure_agg)
def _index_info_cov(info, cov, exclude):
if exclude == 'bads':
exclude = info['bads']
info = pick_info(info, pick_channels(info['ch_names'], cov['names'],
exclude))
del exclude
picks_list = \
_picks_by_type(info, meg_combined=False, ref_meg=False,
exclude=())
picks_by_type = dict(picks_list)
ch_names = [n for n in cov.ch_names if n in info['ch_names']]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
idx_by_type = defaultdict(list)
for ch_type, sel in picks_by_type.items():
idx_by_type[ch_type] = [ch_names.index(info_ch_names[c])
for c in sel if info_ch_names[c] in ch_names]
idx_names = [(idx_by_type[key],
'%s covariance' % DEFAULTS['titles'][key],
DEFAULTS['units'][key],
DEFAULTS['scalings'][key],
key)
for key in _DATA_CH_TYPES_SPLIT
if len(idx_by_type[key]) > 0]
C = cov.data[ch_idx][:, ch_idx]
return info, C, ch_names, idx_names
@verbose
def plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data.
Parameters
----------
cov : instance of Covariance
The covariance matrix.
%(info_not_none)s
exclude : list of str | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
%(verbose)s
Returns
-------
fig_cov : instance of matplotlib.figure.Figure
The covariance plot.
fig_svd : instance of matplotlib.figure.Figure | None
The SVD spectra plot of the covariance.
See Also
--------
mne.compute_rank
Notes
-----
For each channel type, the rank is estimated using
:func:`mne.compute_rank`.
.. versionchanged:: 0.19
Approximate ranks for each channel type are shown with red dashed lines.
"""
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from scipy import linalg
from ..cov import Covariance
info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude)
del cov, exclude
projs = []
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
fig_cov, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, _, _, _) in enumerate(idx_names):
vlim = np.max(np.abs(C[idx][:, idx]))
im = axes[0, k].imshow(C[idx][:, idx], interpolation="nearest",
norm=Normalize(vmin=-vlim, vmax=vlim),
cmap='RdBu_r')
axes[0, k].set(title=name)
if colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(axes[0, k])
cax = divider.append_axes("right", size="5.5%", pad=0.05)
plt.colorbar(im, cax=cax, format='%.0e')
fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, unit, scaling, key) in enumerate(idx_names):
this_C = C[idx][:, idx]
s = linalg.svd(this_C, compute_uv=False)
this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx],
[], [], 0)
this_info = pick_info(info, idx)
this_info['projs'] = []
this_rank = compute_rank(this_C, info=this_info)
# Protect against true zero singular values
s[s <= 0] = 1e-10 * s[s > 0].min()
s = np.sqrt(s) * scaling
axes[0, k].plot(s, color='k', zorder=3)
this_rank = this_rank[key]
axes[0, k].axvline(this_rank - 1, ls='--', color='r',
alpha=0.5, zorder=4, clip_on=False)
axes[0, k].text(this_rank - 1, axes[0, k].get_ylim()[1],
'rank ≈ %d' % (this_rank,), ha='right', va='top',
color='r', alpha=0.5, zorder=4)
axes[0, k].set(ylabel=u'Noise σ (%s)' % unit, yscale='log',
xlabel='Eigenvalue index', title=name,
xlim=[0, len(s) - 1])
tight_layout(fig=fig_svd)
plt_show(show)
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
# Covering frequency gaps with horizontal bars
for lower_bound, upper_bound in gap_bounds:
plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
lower_bound, time_bounds[0], color='#666666')
plt_show(show)
return fig
def _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal',
slices=None, show=True, show_indices=False,
show_orientation=False, img_output=False, width=512):
"""Plot BEM contours on anatomical slices."""
import matplotlib.pyplot as plt
from matplotlib import patheffects
from .._freesurfer import _mri_orientation, _read_mri_info
# For ease of plotting, we will do everything in voxel coordinates.
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
# Load the T1 data
_, vox_mri_t, _, _, _, nim = _read_mri_info(
mri_fname, units='mm', return_img=True)
mri_vox_t = invert_transform(vox_mri_t)['trans']
del vox_mri_t
# plot axes (x, y, z) as data axes
(x, y, z), (flip_x, flip_y, flip_z), order = _mri_orientation(
nim, orientation)
transpose = x < y
data = _get_img_fdata(nim)
shift_x = data.shape[x] if flip_x < 0 else 0
shift_y = data.shape[y] if flip_y < 0 else 0
n_slices = data.shape[z]
if slices is None:
slices = np.round(np.linspace(0, n_slices - 1, 14)).astype(int)[1:-1]
slices = np.atleast_1d(slices).copy()
slices[slices < 0] += n_slices # allow negative indexing
if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \
slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \
slices.dtype.kind not in 'iu':
raise ValueError('slices must be a sorted 1D array of int with unique '
'elements, at least one element, and no elements '
'greater than %d, got %s' % (n_slices - 1, slices))
if flip_z < 0:
# Proceed in the opposite order to maintain left-to-right / orientation
slices = slices[::-1]
# create of list of surfaces
surfs = list()
for file_name, color in surfaces:
surf = dict()
surf['rr'], surf['tris'] = read_surface(file_name)
# move surface to voxel coordinate system
surf['rr'] = apply_trans(mri_vox_t, surf['rr'])
surfs.append((surf, color))
sources = list()
if src is not None:
_ensure_src(src, extra=' or None')
# Eventually we can relax this by allowing ``trans`` if need be
if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI:
raise ValueError(
'Source space must be in MRI coordinates, got '
f'{_frame_to_str[src[0]['coord_frame']]}')
for src_ in src:
points = src_['rr'][src_['inuse'].astype(bool)]
sources.append(apply_trans(mri_vox_t, points * 1e3))
sources = np.concatenate(sources, axis=0)
if img_output:
n_col = n_axes = 1
dpi = 96
# 2x standard MRI resolution is probably good enough for the
# traces
w = width / dpi
figsize = (w, w / data.shape[x] * data.shape[y])
fig = _figure_agg(figsize=figsize, dpi=dpi, facecolor='k')
ax = fig.add_axes([0, 0, 1, 1], frame_on=False, facecolor='k')
axs = [ax] * len(slices)
plt.close(fig)
else:
n_col = 4
fig, axs, _, _ = _prepare_trellis(len(slices), n_col)
fig.set_facecolor('k')
dpi = fig.get_dpi()
n_axes = len(axs)
bounds = np.concatenate(
[[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]]) # float
slicer = [slice(None)] * 3
ori_labels = dict(R='LR', A='PA', S='IS')
xlabels, ylabels = ori_labels[order[0]], ori_labels[order[1]]
path_effects = [patheffects.withStroke(linewidth=4, foreground="k",
alpha=0.75)]
out = list() if img_output else fig
for ai, (ax, sl, lower, upper) in enumerate(zip(
axs, slices, bounds[:-1], bounds[1:])):
# adjust the orientations for good view
slicer[z] = sl
dat = data[tuple(slicer)]
dat = dat.T if transpose else dat
dat = dat[::flip_y, ::flip_x]
# First plot the anatomical data
if img_output:
ax.clear()
ax.imshow(dat, cmap=plt.cm.gray, origin='lower')
ax.set_autoscale_on(False)
ax.axis('off')
ax.set_aspect('equal') # XXX eventually could deal with zooms
# and then plot the contours on top
for surf, color in surfs:
with warnings.catch_warnings(record=True): # ignore contour warn
warnings.simplefilter('ignore')
ax.tricontour(flip_x * surf['rr'][:, x] + shift_x,
flip_y * surf['rr'][:, y] + shift_y,
surf['tris'], surf['rr'][:, z],
levels=[sl], colors=color, linewidths=1.0,
zorder=1)
if len(sources):
in_slice = (sources[:, z] >= lower) & (sources[:, z] < upper)
ax.scatter(flip_x * sources[in_slice, x] + shift_x,
flip_y * sources[in_slice, y] + shift_y,
marker='.', color='#FF00FF', s=1, zorder=2)
if show_indices:
ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl),
color='w', fontsize='x-small', va='bottom', ha='left')
# label the axes
kwargs = dict(
color='#66CCEE', fontsize='medium', path_effects=path_effects,
family='monospace', clip_on=False, zorder=5, weight='bold')
if show_orientation:
if ai % n_col == 0: # left
ax.text(0, dat.shape[0] / 2., xlabels[0],
va='center', ha='left', **kwargs)
if ai % n_col == n_col - 1 or ai == n_axes - 1: # right
ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1],
va='center', ha='right', **kwargs)
if ai >= n_axes - n_col: # bottom
ax.text(dat.shape[1] / 2., 0, ylabels[0],
ha='center', va='bottom', **kwargs)
if ai < n_col or n_col == 1: # top
ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1],
ha='center', va='top', **kwargs)
if img_output:
output = BytesIO()
fig.savefig(output, bbox_inches='tight',
pad_inches=0, format='png', dpi=dpi)
out.append(base64.b64encode(output.getvalue()).decode('ascii'))
fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt_show(show, fig=fig)
return out, flip_z
def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
slices=None, brain_surfaces=None, src=None, show=True,
show_indices=True, mri='T1.mgz', show_orientation=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
brain_surfaces : None | str | list of str
One or more brain surface to plot (optional). Entries should correspond
to files in the subject's ``surf`` directory (e.g. ``"white"``).
src : None | SourceSpaces | str
SourceSpaces instance or path to a source space to plot individual
sources as scatter-plot. Sources will be shown on exactly one slice
(whichever slice is closest to each source in the given orientation
plane). Path can be absolute or relative to the subject's ``bem``
folder.
.. versionchanged:: 0.20
All sources are shown on the nearest slice rather than some
being omitted.
show : bool
Show figure if True.
show_indices : bool
Show slice indices if True.
.. versionadded:: 0.20
mri : str
The name of the MRI to use. Can be a standard FreeSurfer MRI such as
``'T1.mgz'``, or a full path to a custom MRI file.
.. versionadded:: 0.21
show_orientation : str
Show the orientation (L/R, P/A, I/S) of the data slices.
.. versionadded:: 0.21
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
mne.viz.plot_alignment
Notes
-----
Images are plotted in MRI voxel coordinates.
If ``src`` is not None, for a given slice index, all source points are
shown that are halfway between the previous slice and the given slice,
and halfway between the given slice and the next slice.
For large slice decimations, this can
make some source points appear outside the BEM contour, which is shown
for the given slice index. For example, in the case where the single
midpoint slice is used ``slices=[128]``, all source points will be shown
on top of the midpoint MRI slice with the BEM boundary drawn for that
slice.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri_fname = _check_mri(mri, subject, subjects_dir)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
raise IOError('Subject bem directory "%s" does not exist' % bem_path)
surfaces = _get_bem_plotting_surfaces(bem_path)
if brain_surfaces is not None:
if isinstance(brain_surfaces, str):
brain_surfaces = (brain_surfaces,)
for surf_name in brain_surfaces:
for hemi in ('lh', 'rh'):
surf_fname = op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf_name)
if op.exists(surf_fname):
surfaces.append((surf_fname, '#00DD00'))
else:
raise IOError("Surface %s does not exist." % surf_fname)
if isinstance(src, str):
if not op.exists(src):
src_ = op.join(subjects_dir, subject, 'bem', src)
if op.exists(src_):
src = src_
else:
raise IOError("%s does not exist" % src)
src = read_source_spaces(src)
elif src is not None and not isinstance(src, SourceSpaces):
raise TypeError("src needs to be None, str or SourceSpaces instance, "
"not %s" % repr(src))
if len(surfaces) == 0:
raise IOError('No surface files found. Surface files must end with '
'inner_skull.surf, outer_skull.surf or outer_skin.surf')
# Plot the contours
return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices,
show, show_indices, show_orientation)[0]
def _get_bem_plotting_surfaces(bem_path):
surfaces = []
for surf_name, color in (('*inner_skull', '#FF0000'),
('*outer_skull', '#FFFF00'),
('*outer_skin', '#FFAA80')):
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
logger.info("Using surface: %s" % surf_fname)
surfaces.append((surf_fname, color))
return surfaces
@verbose
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
axes=None, equal_spacing=True, show=True, on_missing='raise',
verbose=None):
"""Plot events to get a visual display of the paradigm.
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Recordings made on Neuromag systems
number samples relative to the system start (not relative to the
beginning of the recording). In such cases the ``raw.first_samp``
attribute can be passed here. Default is 0.
color : dict | None
Dictionary of event_id integers as keys and colors as values. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors). Color can be any
valid :doc:`matplotlib color <tutorials/colors/colors>`.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and their associated
event_id values. Labels are used to plot a legend. If None, no legend
is drawn.
axes : instance of Axes
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
%(on_missing_events)s
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
if sfreq is None:
sfreq = 1.0
xlabel = 'Samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
if len(events) == 0:
raise ValueError('No events in events array, cannot plot.')
unique_events = np.unique(events[:, 2])
if event_id is not None:
# get labels and unique event ids from event_id dict,
# sorted by value
event_id_rev = {v: k for k, v in event_id.items()}
conditions, unique_events_id = zip(*sorted(event_id.items(),
key=lambda x: x[1]))
keep = np.ones(len(unique_events_id), bool)
for ii, this_event in enumerate(unique_events_id):
if this_event not in unique_events:
msg = f'{this_event} from event_id is not present in events.'
_on_missing(on_missing, msg)
keep[ii] = False
conditions = [cond for cond, k in zip(conditions, keep) if k]
unique_events_id = [id_ for id_, k in zip(unique_events_id, keep) if k]
if len(unique_events_id) == 0:
raise RuntimeError('No usable event IDs found')
for this_event in unique_events:
if this_event not in unique_events_id:
warn('event %s missing from event_id will be ignored'
% this_event)
else:
unique_events_id = unique_events
color = _handle_event_colors(color, unique_events, event_id)
import matplotlib.pyplot as plt
fig = None
if axes is None:
fig = plt.figure()
ax = axes if axes else plt.gca()
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
max_x = (events[np.in1d(events[:, 2], unique_events_id), 0].max() -
first_samp) / sfreq
handles, labels = list(), list()
for idx, ev in enumerate(unique_events_id):
ev_mask = events[:, 2] == ev
count = ev_mask.sum()
if count == 0:
continue
y = np.full(count, idx + 1 if equal_spacing else events[ev_mask, 2][0])
if event_id is not None:
event_label = '%s (%s)' % (event_id_rev[ev], count)
else:
event_label = 'N=%d' % (count,)
labels.append(event_label)
kwargs = {}
if ev in color:
kwargs['color'] = color[ev]
handles.append(
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
y, '.', clip_on=False, **kwargs)[0])
if equal_spacing:
ax.set_ylim(0, unique_events_id.size + 1)
ax.set_yticks(1 + np.arange(unique_events_id.size))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([min_event - 1, max_event + 1])
ax.set(xlabel=xlabel, ylabel='Event id', xlim=[0, max_x])
ax.grid(True)
fig = fig if fig is not None else plt.gcf()
# reverse order so that the highest numbers are at the top
# (match plot order)
handles, labels = handles[::-1], labels[::-1]
box = ax.get_position()
factor = 0.8 if event_id is not None else 0.9
ax.set_position([box.x0, box.y0, box.width * factor, box.height])
ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5),
fontsize='small')
fig.canvas.draw()
plt_show(show)
return fig
def _get_presser(fig):
"""Get our press callback."""
import matplotlib
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for key, val in callbacks.items():
if LooseVersion(matplotlib.__version__) >= '3':
func = val()
else:
func = val.func
if func.__class__.__name__ == 'partial':
break
else:
func = None
assert func is not None
return func
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
"""Plot the amplitude traces of a set of dipoles.
Parameters
----------
dipoles : list of instance of Dipole
The dipoles whose amplitudes should be shown.
colors : list of color | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if colors is None:
colors = cycle(_get_color_list())
fig, ax = plt.subplots(1, 1)
xlim = [np.inf, -np.inf]
for dip, color in zip(dipoles, colors):
ax.plot(dip.times, dip.amplitude * 1e9, color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[-1])
ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')
if show:
fig.show(warn=False)
return fig
def adjust_axes(axes, remove_spines=('top', 'right'), grid=True):
"""Adjust some properties of axes.
Parameters
----------
axes : list
List of axes to process.
remove_spines : list of str
Which axis spines to remove.
grid : bool
Turn grid on (True) or off (False).
"""
axes = [axes] if not isinstance(axes, (list, tuple, np.ndarray)) else axes
for ax in axes:
if grid:
ax.grid(zorder=0)
for key in remove_spines:
ax.spines[key].set_visible(False)
def _filter_ticks(lims, fscale):
"""Create approximately spaced ticks between lims."""
if fscale == 'linear':
return None, None # let matplotlib handle it
lims = np.array(lims)
ticks = list()
if lims[1] > 20 * lims[0]:
base = np.array([1, 2, 4])
else:
base = np.arange(1, 11)
for exp in range(int(np.floor(np.log10(lims[0]))),
int(np.floor(np.log10(lims[1]))) + 1):
ticks += (base * (10 ** exp)).tolist()
ticks = np.array(ticks)
ticks = ticks[(ticks >= lims[0]) & (ticks <= lims[1])]
ticklabels = [('%g' if t < 1 else '%d') % t for t in ticks]
return ticks, ticklabels
def _get_flim(flim, fscale, freq, sfreq=None):
"""Get reasonable frequency limits."""
if flim is None:
if freq is None:
flim = [0.1 if fscale == 'log' else 0., sfreq / 2.]
else:
if fscale == 'linear':
flim = [freq[0]]
else:
flim = [freq[0] if freq[0] > 0 else 0.1 * freq[1]]
flim += [freq[-1]]
if fscale == 'log':
if flim[0] <= 0:
raise ValueError('flim[0] must be positive, got %s' % flim[0])
elif flim[0] < 0:
raise ValueError('flim[0] must be non-negative, got %s' % flim[0])
return flim
def _check_fscale(fscale):
"""Check for valid fscale."""
if not isinstance(fscale, str) or fscale not in ('log', 'linear'):
raise ValueError('fscale must be "log" or "linear", got %s'
% (fscale,))
_DEFAULT_ALIM = (-80, 10)
def plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4',
flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True,
compensate=False, plot=('time', 'magnitude', 'delay'),
axes=None):
"""Plot properties of a filter.
Parameters
----------
h : dict or ndarray
An IIR dict or 1D ndarray of coefficients (for FIR filter).
sfreq : float
Sample rate of the data (Hz).
freq : array-like or None
The ideal response frequencies to plot (must be in ascending order).
If None (default), do not plot the ideal response.
gain : array-like or None
The ideal response gains to plot.
If None (default), do not plot the ideal response.
title : str | None
The title to use. If None (default), determine the title based
on the type of the system.
color : color object
The color to use (default '#1f77b4').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None, freq will be used. If None (default) and freq is None,
``(0.1, sfreq / 2.)`` will be used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
The y-axis amplitude limits (dB) to use (default: (-60, 10)).
show : bool
Show figure if True (default).
compensate : bool
If True, compensate for the filter delay (phase will not be shown).
- For linear-phase FIR filters, this visualizes the filter coefficients
assuming that the output will be shifted by ``N // 2``.
- For IIR filters, this changes the filter coefficient display
by filtering backward and forward, and the frequency response
by squaring it.
.. versionadded:: 0.18
plot : list | tuple | str
A list of the requested plots from ``time``, ``magnitude`` and
``delay``. Default is to plot all three filter properties
('time', 'magnitude', 'delay').
.. versionadded:: 0.21.0
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of requested plot types. If instance of
Axes, there must be only one filter property plotted.
Defaults to ``None``.
.. versionadded:: 0.21.0
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the plots.
See Also
--------
mne.filter.create_filter
plot_ideal_filter
Notes
-----
.. versionadded:: 0.14
"""
from scipy.signal import (
freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt)
import matplotlib.pyplot as plt
sfreq = float(sfreq)
_check_option('fscale', fscale, ['log', 'linear'])
if isinstance(plot, str):
plot = [plot]
for xi, x in enumerate(plot):
_check_option('plot[%d]' % xi, x, ('magnitude', 'delay', 'time'))
flim = _get_flim(flim, fscale, freq, sfreq)
if fscale == 'log':
omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)
else:
omega = np.linspace(flim[0], flim[1], 1000)
xticks, xticklabels = _filter_ticks(flim, fscale)
omega /= sfreq / (2 * np.pi)
if isinstance(h, dict): # IIR h.ndim == 2: # second-order sections
if 'sos' in h:
H = np.ones(len(omega), np.complex128)
gd = np.zeros(len(omega))
for section in h['sos']:
this_H = freqz(section[:3], section[3:], omega)[1]
H *= this_H
if compensate:
H *= this_H.conj() # time reversal is freq conj
else:
# Assume the forward-backward delay zeros out, which it
# mostly should
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd += group_delay((section[:3], section[3:]), omega)[1]
n = estimate_ringing_samples(h['sos'])
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = sosfiltfilt
gd += (len(delta) - 1) // 2
else:
func = sosfilt
h = func(h['sos'], delta)
else:
H = freqz(h['b'], h['a'], omega)[1]
if compensate:
H *= H.conj()
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h['b'], h['a']), omega)[1]
if compensate:
gd += group_delay(h['b'].conj(), h['a'].conj(), omega)[1]
n = estimate_ringing_samples((h['b'], h['a']))
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = filtfilt
else:
func = lfilter
h = func(h['b'], h['a'], delta)
if title is None:
title = 'SOS (IIR) filter'
if compensate:
title += ' (forward-backward)'
else:
H = freqz(h, worN=omega)[1]
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h, [1.]), omega)[1]
title = 'FIR filter' if title is None else title
if compensate:
title += ' (delay-compensated)'
fig = None
if axes is None:
fig, axes = plt.subplots(len(plot), 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if len(axes) != len(plot):
raise ValueError('Length of axes (%d) must be the same as number of '
'requested filter properties (%d)'
% (len(axes), len(plot)))
t = np.arange(len(h))
dlim = np.abs(t).max() / 2.
dlim = [-dlim, dlim]
if compensate:
n_shift = (len(h) - 1) // 2
t -= n_shift
assert t[0] == -t[-1]
gd -= n_shift
t = t / sfreq
gd = gd / sfreq
f = omega * sfreq / (2 * np.pi)
sl = slice(0 if fscale == 'linear' else 1, None, None)
mag = 10 * np.log10(np.maximum((H * H.conj()).real, 1e-20))
if 'time' in plot:
ax_time_idx = np.where([p == 'time' for p in plot])[0][0]
axes[ax_time_idx].plot(t, h, color=color)
axes[ax_time_idx].set(xlim=t[[0, -1]], xlabel='Time (s)',
ylabel='Amplitude', title=title)
# Magnitude
if 'magnitude' in plot:
ax_mag_idx = np.where([p == 'magnitude' for p in plot])[0][0]
axes[ax_mag_idx].plot(f[sl], mag[sl], color=color,
linewidth=2, zorder=4)
if freq is not None and gain is not None:
plot_ideal_filter(freq, gain, axes[ax_mag_idx],
fscale=fscale, show=False)
axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel='', xscale=fscale)
if xticks is not None:
axes[ax_mag_idx].set(xticks=xticks)
axes[ax_mag_idx].set(xticklabels=xticklabels)
axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)',
ylabel='Amplitude (dB)')
# Delay
if 'delay' in plot:
ax_delay_idx = np.where([p == 'delay' for p in plot])[0][0]
axes[ax_delay_idx].plot(f[sl], gd[sl], color=color,
linewidth=2, zorder=4)
# shade nulled regions
for start, stop in zip(*_mask_to_onsets_offsets(mag <= -39.9)):
axes[ax_delay_idx].axvspan(f[start], f[stop - 1],
facecolor='k', alpha=0.05,
zorder=5)
axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)',
xlabel='Frequency (Hz)',
xscale=fscale)
if xticks is not None:
axes[ax_delay_idx].set(xticks=xticks)
axes[ax_delay_idx].set(xticklabels=xticklabels)
axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)',
ylabel='Delay (s)')
adjust_axes(axes)
tight_layout()
plt_show(show)
return fig
def plot_ideal_filter(freq, gain, axes=None, title='', flim=None, fscale='log',
alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--',
show=True):
"""Plot an ideal filter response.
Parameters
----------
freq : array-like
The ideal response frequencies to plot (must be in ascending order).
gain : array-like or None
The ideal response gains to plot.
axes : instance of Axes | None
The subplot handle. With None (default), axes are created.
title : str
The title to use, (default: '').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None (default), freq used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
If not None (default), the y-axis limits (dB) to use.
color : color object
The color to use (default: 'r').
alpha : float
The alpha to use (default: 0.5).
linestyle : str
The line style to use (default: '--').
show : bool
Show figure if True (default).
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
plot_filter
Notes
-----
.. versionadded:: 0.14
Examples
--------
Plot a simple ideal band-pass filter::
>>> from mne.viz import plot_ideal_filter
>>> freq = [0, 1, 40, 50]
>>> gain = [0, 1, 1, 0]
>>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS
<...Figure...>
"""
import matplotlib.pyplot as plt
my_freq, my_gain = list(), list()
if freq[0] != 0:
raise ValueError('freq should start with DC (zero) and end with '
'Nyquist, but got %s for DC' % (freq[0],))
freq = np.array(freq)
# deal with semilogx problems @ x=0
_check_option('fscale', fscale, ['log', 'linear'])
if fscale == 'log':
freq[0] = 0.1 * freq[1] if flim is None else min(flim[0], freq[1])
flim = _get_flim(flim, fscale, freq)
transitions = list()
for ii in range(len(freq)):
if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:
transitions += [[freq[ii], freq[ii + 1]]]
my_freq += np.linspace(freq[ii], freq[ii + 1], 20,
endpoint=False).tolist()
my_gain += np.linspace(gain[ii], gain[ii + 1], 20,
endpoint=False).tolist()
else:
my_freq.append(freq[ii])
my_gain.append(gain[ii])
my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (alim[0] / 10.)))
if axes is None:
axes = plt.subplots(1)[1]
for transition in transitions:
axes.axvspan(*transition, color=color, alpha=0.1)
axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5,
linewidth=4, zorder=3)
xticks, xticklabels = _filter_ticks(flim, fscale)
axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)',
xscale=fscale)
if xticks is not None:
axes.set(xticks=xticks)
axes.set(xticklabels=xticklabels)
axes.set(xlim=flim)
if title:
axes.set(title=title)
adjust_axes(axes)
tight_layout()
plt_show(show)
return axes.figure
def _handle_event_colors(color_dict, unique_events, event_id):
"""Create event-integer-to-color mapping, assigning defaults as needed."""
default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))
# warn if not enough colors
if color_dict is None:
if len(unique_events) > len(_get_color_list()):
warn('More events than default colors available. You should pass '
'a list of unique colors.')
else:
custom_colors = dict()
for key, color in color_dict.items():
if key in unique_events: # key was a valid event integer
custom_colors[key] = color
elif key in event_id: # key was an event label
custom_colors[event_id[key]] = color
else: # key not a valid event, warn and ignore
warn('Event ID %s is in the color dict but is not '
'present in events or event_id.' % str(key))
# warn if color_dict is missing any entries
unassigned = sorted(set(unique_events) - set(custom_colors))
if len(unassigned):
unassigned_str = ', '.join(str(e) for e in unassigned)
warn('Color was not assigned for event%s %s. Default colors will '
'be used.' % (_pl(unassigned), unassigned_str))
default_colors.update(custom_colors)
return default_colors
@fill_doc
def plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None,
n_cols=None, show=True):
"""Plot CSD matrices.
A sub-plot is created for each frequency. If an info object is passed to
the function, different channel types are plotted in different figures.
Parameters
----------
csd : instance of CrossSpectralDensity
The CSD matrix to plot.
%(info)s
Used to split the figure by channel-type, if provided.
By default, the CSD matrix is plotted as a whole.
mode : 'csd' | 'coh'
Whether to plot the cross-spectral density ('csd', the default), or
the coherence ('coh') between the channels.
colorbar : bool
Whether to show a colorbar. Defaults to ``True``.
cmap : str | None
The matplotlib colormap to use. Defaults to None, which means the
colormap will default to matplotlib's default.
n_cols : int | None
CSD matrices are plotted in a grid. This parameter controls how
many matrix to plot side by side before starting a new row. By
default, a number will be chosen to make the grid as square as
possible.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : list of Figure
The figures created by this function.
"""
import matplotlib.pyplot as plt
if mode not in ['csd', 'coh']:
raise ValueError('"mode" should be either "csd" or "coh".')
if info is not None:
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[])
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=[])
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=[])
idx_eeg = [csd.ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in csd.ch_names]
idx_mag = [csd.ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in csd.ch_names]
idx_grad = [csd.ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in csd.ch_names]
indices = [idx_eeg, idx_mag, idx_grad]
titles = ['EEG', 'Magnetometers', 'Gradiometers']
if mode == 'csd':
# The units in which to plot the CSD
units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')
scalings = dict(eeg=1e12, grad=1e26, mag=1e30)
else:
indices = [np.arange(len(csd.ch_names))]
if mode == 'csd':
titles = ['Cross-spectral density']
# Units and scaling unknown
units = dict()
scalings = dict()
elif mode == 'coh':
titles = ['Coherence']
n_freqs = len(csd.frequencies)
if n_cols is None:
n_cols = int(np.ceil(np.sqrt(n_freqs)))
n_rows = int(np.ceil(n_freqs / float(n_cols)))
figs = []
for ind, title, ch_type in zip(indices, titles, ['eeg', 'mag', 'grad']):
if len(ind) == 0:
continue
fig, axes = plt.subplots(n_rows, n_cols, squeeze=False,
figsize=(2 * n_cols + 1, 2.2 * n_rows))
csd_mats = []
for i in range(len(csd.frequencies)):
cm = csd.get_data(index=i)[ind][:, ind]
if mode == 'csd':
cm = np.abs(cm) * scalings.get(ch_type, 1)
elif mode == 'coh':
# Compute coherence from the CSD matrix
psd = np.diag(cm).real
cm = np.abs(cm) ** 2 / psd[np.newaxis, :] / psd[:, np.newaxis]
csd_mats.append(cm)
vmax = np.max(csd_mats)
for i, (freq, mat) in enumerate(zip(csd.frequencies, csd_mats)):
ax = axes[i // n_cols][i % n_cols]
im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0,
vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if csd._is_sum:
ax.set_title('%.1f-%.1f Hz.' % (np.min(freq),
np.max(freq)))
else:
ax.set_title('%.1f Hz.' % freq)
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
if colorbar:
cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])
if mode == 'csd':
label = u'CSD'
if ch_type in units:
label += u' (%s)' % units[ch_type]
cb.set_label(label)
elif mode == 'coh':
cb.set_label('Coherence')
figs.append(fig)
plt_show(show)
return figs
def plot_chpi_snr(snr_dict, axes=None):
"""Plot time-varying SNR estimates of the HPI coils.
Parameters
----------
snr_dict : dict
The dictionary returned by `~mne.chpi.compute_chpi_snr`. Must have keys
``times``, ``freqs``, ``TYPE_snr``, ``TYPE_power``, and ``TYPE_resid``
(where ``TYPE`` can be ``mag`` or ``grad`` or both).
axes : None | list of matplotlib.axes.Axes
Figure axes in which to draw the SNR, power, and residual plots. The
number of axes should be 3× the number of MEG sensor types present in
``snr_dict``. If ``None`` (the default), a new
`~matplotlib.figure.Figure` is created with the required number of
axes.
Returns
-------
fig : instance of matplotlib.figure.Figure
A figure with subplots for SNR, power, and residual variance,
separately for magnetometers and/or gradiometers (depending on what is
present in ``snr_dict``).
Notes
-----
If you supply a list of existing `~matplotlib.axes.Axes`, then the figure
legend will not be drawn automatically. If you still want it, running
``fig.legend(loc='right', title='cHPI frequencies')`` will recreate it,
though you may also need to manually adjust the margin to make room for it
(e.g., using ``fig.subplots_adjust(right=0.8)``).
.. versionadded:: 0.24
"""
import matplotlib.pyplot as plt
valid_keys = list(snr_dict)[2:]
titles = dict(snr='SNR', power='cHPI power', resid='Residual variance')
full_names = dict(mag='magnetometers', grad='gradiometers')
axes_was_none = axes is None
if axes_was_none:
fig, axes = plt.subplots(len(valid_keys), 1, sharex=True)
else:
fig = axes[0].get_figure()
if len(axes) != len(valid_keys):
raise ValueError(f'axes must be a list of {len(valid_keys)} axes, got '
f'length {len(axes)} ({axes}).')
fig.set_size_inches(10, 10)
legend_labels_exist = False
for key, ax in zip(valid_keys, axes):
ch_type, kind = key.split('_')
scaling = 1 if kind == 'snr' else DEFAULTS['scalings'][ch_type]
plot_kwargs = dict(color='k') if kind == 'resid' else dict()
lines = ax.plot(snr_dict['times'], snr_dict[key] * scaling ** 2,
**plot_kwargs)
# the freqs should be the same for all sensor types (and for SNR and
# power subplots), so we only need to label the lines on one axes
# (otherwise we get duplicate legend entries).
if not legend_labels_exist:
for line, freq in zip(lines, snr_dict['freqs']):
line.set_label(f'{freq} Hz')
legend_labels_exist = True
unit = DEFAULTS['units'][ch_type]
unit = f'({unit})' if '/' in unit else unit
set_kwargs = dict(title=f'{titles[kind]}, {full_names[ch_type]}',
ylabel='dB' if kind == 'snr' else f'{unit}²')
if not axes_was_none:
set_kwargs.update(xlabel='Time (s)')
ax.set(**set_kwargs)
if axes_was_none:
ax.set(xlabel='Time (s)')
fig.align_ylabels()
fig.subplots_adjust(left=0.1, right=0.825, bottom=0.075, top=0.95,
hspace=0.7)
fig.legend(loc='right', title='cHPI frequencies')
return fig
|
# -*- coding: utf-8 -*-
"""Functions to make simple plots with M/EEG data."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import base64
import copy
from glob import glob
from io import BytesIO
from itertools import cycle
import os.path as op
import warnings
from distutils.version import LooseVersion
from collections import defaultdict
import numpy as np
from ..defaults import DEFAULTS
from ..fixes import _get_img_fdata
from ..rank import compute_rank
from ..surface import read_surface
from ..io.constants import FIFF
from ..io.proj import make_projector
from ..io.pick import (_DATA_CH_TYPES_SPLIT, pick_types, pick_info,
pick_channels)
from ..source_space import (read_source_spaces, SourceSpaces,
_check_mri, _ensure_src)
from ..transforms import invert_transform, apply_trans, _frame_to_str
from ..utils import (logger, verbose, warn, _check_option, get_subjects_dir,
_mask_to_onsets_offsets, _pl, _on_missing, fill_doc)
from ..io.pick import _picks_by_type
from ..filter import estimate_ringing_samples
from .utils import (tight_layout, _get_color_list, _prepare_trellis, plt_show,
_figure_agg)
def _index_info_cov(info, cov, exclude):
if exclude == 'bads':
exclude = info['bads']
info = pick_info(info, pick_channels(info['ch_names'], cov['names'],
exclude))
del exclude
picks_list = \
_picks_by_type(info, meg_combined=False, ref_meg=False,
exclude=())
picks_by_type = dict(picks_list)
ch_names = [n for n in cov.ch_names if n in info['ch_names']]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
idx_by_type = defaultdict(list)
for ch_type, sel in picks_by_type.items():
idx_by_type[ch_type] = [ch_names.index(info_ch_names[c])
for c in sel if info_ch_names[c] in ch_names]
idx_names = [(idx_by_type[key],
'%s covariance' % DEFAULTS['titles'][key],
DEFAULTS['units'][key],
DEFAULTS['scalings'][key],
key)
for key in _DATA_CH_TYPES_SPLIT
if len(idx_by_type[key]) > 0]
C = cov.data[ch_idx][:, ch_idx]
return info, C, ch_names, idx_names
@verbose
def plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data.
Parameters
----------
cov : instance of Covariance
The covariance matrix.
%(info_not_none)s
exclude : list of str | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
%(verbose)s
Returns
-------
fig_cov : instance of matplotlib.figure.Figure
The covariance plot.
fig_svd : instance of matplotlib.figure.Figure | None
The SVD spectra plot of the covariance.
See Also
--------
mne.compute_rank
Notes
-----
For each channel type, the rank is estimated using
:func:`mne.compute_rank`.
.. versionchanged:: 0.19
Approximate ranks for each channel type are shown with red dashed lines.
"""
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from scipy import linalg
from ..cov import Covariance
info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude)
del cov, exclude
projs = []
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
fig_cov, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, _, _, _) in enumerate(idx_names):
vlim = np.max(np.abs(C[idx][:, idx]))
im = axes[0, k].imshow(C[idx][:, idx], interpolation="nearest",
norm=Normalize(vmin=-vlim, vmax=vlim),
cmap='RdBu_r')
axes[0, k].set(title=name)
if colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(axes[0, k])
cax = divider.append_axes("right", size="5.5%", pad=0.05)
plt.colorbar(im, cax=cax, format='%.0e')
fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, unit, scaling, key) in enumerate(idx_names):
this_C = C[idx][:, idx]
s = linalg.svd(this_C, compute_uv=False)
this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx],
[], [], 0)
this_info = pick_info(info, idx)
this_info['projs'] = []
this_rank = compute_rank(this_C, info=this_info)
# Protect against true zero singular values
s[s <= 0] = 1e-10 * s[s > 0].min()
s = np.sqrt(s) * scaling
axes[0, k].plot(s, color='k', zorder=3)
this_rank = this_rank[key]
axes[0, k].axvline(this_rank - 1, ls='--', color='r',
alpha=0.5, zorder=4, clip_on=False)
axes[0, k].text(this_rank - 1, axes[0, k].get_ylim()[1],
'rank ≈ %d' % (this_rank,), ha='right', va='top',
color='r', alpha=0.5, zorder=4)
axes[0, k].set(ylabel=u'Noise σ (%s)' % unit, yscale='log',
xlabel='Eigenvalue index', title=name,
xlim=[0, len(s) - 1])
tight_layout(fig=fig_svd)
plt_show(show)
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
# Covering frequency gaps with horizontal bars
for lower_bound, upper_bound in gap_bounds:
plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
lower_bound, time_bounds[0], color='#666666')
plt_show(show)
return fig
def _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal',
slices=None, show=True, show_indices=False,
show_orientation=False, img_output=False, width=512):
"""Plot BEM contours on anatomical slices."""
import matplotlib.pyplot as plt
from matplotlib import patheffects
from .._freesurfer import _mri_orientation, _read_mri_info
# For ease of plotting, we will do everything in voxel coordinates.
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
# Load the T1 data
_, vox_mri_t, _, _, _, nim = _read_mri_info(
mri_fname, units='mm', return_img=True)
mri_vox_t = invert_transform(vox_mri_t)['trans']
del vox_mri_t
# plot axes (x, y, z) as data axes
(x, y, z), (flip_x, flip_y, flip_z), order = _mri_orientation(
nim, orientation)
transpose = x < y
data = _get_img_fdata(nim)
shift_x = data.shape[x] if flip_x < 0 else 0
shift_y = data.shape[y] if flip_y < 0 else 0
n_slices = data.shape[z]
if slices is None:
slices = np.round(np.linspace(0, n_slices - 1, 14)).astype(int)[1:-1]
slices = np.atleast_1d(slices).copy()
slices[slices < 0] += n_slices # allow negative indexing
if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \
slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \
slices.dtype.kind not in 'iu':
raise ValueError('slices must be a sorted 1D array of int with unique '
'elements, at least one element, and no elements '
'greater than %d, got %s' % (n_slices - 1, slices))
if flip_z < 0:
# Proceed in the opposite order to maintain left-to-right / orientation
slices = slices[::-1]
# create of list of surfaces
surfs = list()
for file_name, color in surfaces:
surf = dict()
surf['rr'], surf['tris'] = read_surface(file_name)
# move surface to voxel coordinate system
surf['rr'] = apply_trans(mri_vox_t, surf['rr'])
surfs.append((surf, color))
sources = list()
if src is not None:
_ensure_src(src, extra=' or None')
# Eventually we can relax this by allowing ``trans`` if need be
if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI:
raise ValueError(
'Source space must be in MRI coordinates, got '
f'{_frame_to_str[src[0]["coord_frame"]]}')
for src_ in src:
points = src_['rr'][src_['inuse'].astype(bool)]
sources.append(apply_trans(mri_vox_t, points * 1e3))
sources = np.concatenate(sources, axis=0)
if img_output:
n_col = n_axes = 1
dpi = 96
# 2x standard MRI resolution is probably good enough for the
# traces
w = width / dpi
figsize = (w, w / data.shape[x] * data.shape[y])
fig = _figure_agg(figsize=figsize, dpi=dpi, facecolor='k')
ax = fig.add_axes([0, 0, 1, 1], frame_on=False, facecolor='k')
axs = [ax] * len(slices)
plt.close(fig)
else:
n_col = 4
fig, axs, _, _ = _prepare_trellis(len(slices), n_col)
fig.set_facecolor('k')
dpi = fig.get_dpi()
n_axes = len(axs)
bounds = np.concatenate(
[[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]]) # float
slicer = [slice(None)] * 3
ori_labels = dict(R='LR', A='PA', S='IS')
xlabels, ylabels = ori_labels[order[0]], ori_labels[order[1]]
path_effects = [patheffects.withStroke(linewidth=4, foreground="k",
alpha=0.75)]
out = list() if img_output else fig
for ai, (ax, sl, lower, upper) in enumerate(zip(
axs, slices, bounds[:-1], bounds[1:])):
# adjust the orientations for good view
slicer[z] = sl
dat = data[tuple(slicer)]
dat = dat.T if transpose else dat
dat = dat[::flip_y, ::flip_x]
# First plot the anatomical data
if img_output:
ax.clear()
ax.imshow(dat, cmap=plt.cm.gray, origin='lower')
ax.set_autoscale_on(False)
ax.axis('off')
ax.set_aspect('equal') # XXX eventually could deal with zooms
# and then plot the contours on top
for surf, color in surfs:
with warnings.catch_warnings(record=True): # ignore contour warn
warnings.simplefilter('ignore')
ax.tricontour(flip_x * surf['rr'][:, x] + shift_x,
flip_y * surf['rr'][:, y] + shift_y,
surf['tris'], surf['rr'][:, z],
levels=[sl], colors=color, linewidths=1.0,
zorder=1)
if len(sources):
in_slice = (sources[:, z] >= lower) & (sources[:, z] < upper)
ax.scatter(flip_x * sources[in_slice, x] + shift_x,
flip_y * sources[in_slice, y] + shift_y,
marker='.', color='#FF00FF', s=1, zorder=2)
if show_indices:
ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl),
color='w', fontsize='x-small', va='bottom', ha='left')
# label the axes
kwargs = dict(
color='#66CCEE', fontsize='medium', path_effects=path_effects,
family='monospace', clip_on=False, zorder=5, weight='bold')
if show_orientation:
if ai % n_col == 0: # left
ax.text(0, dat.shape[0] / 2., xlabels[0],
va='center', ha='left', **kwargs)
if ai % n_col == n_col - 1 or ai == n_axes - 1: # right
ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1],
va='center', ha='right', **kwargs)
if ai >= n_axes - n_col: # bottom
ax.text(dat.shape[1] / 2., 0, ylabels[0],
ha='center', va='bottom', **kwargs)
if ai < n_col or n_col == 1: # top
ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1],
ha='center', va='top', **kwargs)
if img_output:
output = BytesIO()
fig.savefig(output, bbox_inches='tight',
pad_inches=0, format='png', dpi=dpi)
out.append(base64.b64encode(output.getvalue()).decode('ascii'))
fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt_show(show, fig=fig)
return out, flip_z
def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
slices=None, brain_surfaces=None, src=None, show=True,
show_indices=True, mri='T1.mgz', show_orientation=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
brain_surfaces : None | str | list of str
One or more brain surface to plot (optional). Entries should correspond
to files in the subject's ``surf`` directory (e.g. ``"white"``).
src : None | SourceSpaces | str
SourceSpaces instance or path to a source space to plot individual
sources as scatter-plot. Sources will be shown on exactly one slice
(whichever slice is closest to each source in the given orientation
plane). Path can be absolute or relative to the subject's ``bem``
folder.
.. versionchanged:: 0.20
All sources are shown on the nearest slice rather than some
being omitted.
show : bool
Show figure if True.
show_indices : bool
Show slice indices if True.
.. versionadded:: 0.20
mri : str
The name of the MRI to use. Can be a standard FreeSurfer MRI such as
``'T1.mgz'``, or a full path to a custom MRI file.
.. versionadded:: 0.21
show_orientation : str
Show the orientation (L/R, P/A, I/S) of the data slices.
.. versionadded:: 0.21
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
mne.viz.plot_alignment
Notes
-----
Images are plotted in MRI voxel coordinates.
If ``src`` is not None, for a given slice index, all source points are
shown that are halfway between the previous slice and the given slice,
and halfway between the given slice and the next slice.
For large slice decimations, this can
make some source points appear outside the BEM contour, which is shown
for the given slice index. For example, in the case where the single
midpoint slice is used ``slices=[128]``, all source points will be shown
on top of the midpoint MRI slice with the BEM boundary drawn for that
slice.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri_fname = _check_mri(mri, subject, subjects_dir)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
raise IOError('Subject bem directory "%s" does not exist' % bem_path)
surfaces = _get_bem_plotting_surfaces(bem_path)
if brain_surfaces is not None:
if isinstance(brain_surfaces, str):
brain_surfaces = (brain_surfaces,)
for surf_name in brain_surfaces:
for hemi in ('lh', 'rh'):
surf_fname = op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf_name)
if op.exists(surf_fname):
surfaces.append((surf_fname, '#00DD00'))
else:
raise IOError("Surface %s does not exist." % surf_fname)
if isinstance(src, str):
if not op.exists(src):
src_ = op.join(subjects_dir, subject, 'bem', src)
if op.exists(src_):
src = src_
else:
raise IOError("%s does not exist" % src)
src = read_source_spaces(src)
elif src is not None and not isinstance(src, SourceSpaces):
raise TypeError("src needs to be None, str or SourceSpaces instance, "
"not %s" % repr(src))
if len(surfaces) == 0:
raise IOError('No surface files found. Surface files must end with '
'inner_skull.surf, outer_skull.surf or outer_skin.surf')
# Plot the contours
return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices,
show, show_indices, show_orientation)[0]
def _get_bem_plotting_surfaces(bem_path):
surfaces = []
for surf_name, color in (('*inner_skull', '#FF0000'),
('*outer_skull', '#FFFF00'),
('*outer_skin', '#FFAA80')):
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
logger.info("Using surface: %s" % surf_fname)
surfaces.append((surf_fname, color))
return surfaces
@verbose
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
axes=None, equal_spacing=True, show=True, on_missing='raise',
verbose=None):
"""Plot events to get a visual display of the paradigm.
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Recordings made on Neuromag systems
number samples relative to the system start (not relative to the
beginning of the recording). In such cases the ``raw.first_samp``
attribute can be passed here. Default is 0.
color : dict | None
Dictionary of event_id integers as keys and colors as values. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors). Color can be any
valid :doc:`matplotlib color <tutorials/colors/colors>`.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and their associated
event_id values. Labels are used to plot a legend. If None, no legend
is drawn.
axes : instance of Axes
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
%(on_missing_events)s
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
if sfreq is None:
sfreq = 1.0
xlabel = 'Samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
if len(events) == 0:
raise ValueError('No events in events array, cannot plot.')
unique_events = np.unique(events[:, 2])
if event_id is not None:
# get labels and unique event ids from event_id dict,
# sorted by value
event_id_rev = {v: k for k, v in event_id.items()}
conditions, unique_events_id = zip(*sorted(event_id.items(),
key=lambda x: x[1]))
keep = np.ones(len(unique_events_id), bool)
for ii, this_event in enumerate(unique_events_id):
if this_event not in unique_events:
msg = f'{this_event} from event_id is not present in events.'
_on_missing(on_missing, msg)
keep[ii] = False
conditions = [cond for cond, k in zip(conditions, keep) if k]
unique_events_id = [id_ for id_, k in zip(unique_events_id, keep) if k]
if len(unique_events_id) == 0:
raise RuntimeError('No usable event IDs found')
for this_event in unique_events:
if this_event not in unique_events_id:
warn('event %s missing from event_id will be ignored'
% this_event)
else:
unique_events_id = unique_events
color = _handle_event_colors(color, unique_events, event_id)
import matplotlib.pyplot as plt
fig = None
if axes is None:
fig = plt.figure()
ax = axes if axes else plt.gca()
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
max_x = (events[np.in1d(events[:, 2], unique_events_id), 0].max() -
first_samp) / sfreq
handles, labels = list(), list()
for idx, ev in enumerate(unique_events_id):
ev_mask = events[:, 2] == ev
count = ev_mask.sum()
if count == 0:
continue
y = np.full(count, idx + 1 if equal_spacing else events[ev_mask, 2][0])
if event_id is not None:
event_label = '%s (%s)' % (event_id_rev[ev], count)
else:
event_label = 'N=%d' % (count,)
labels.append(event_label)
kwargs = {}
if ev in color:
kwargs['color'] = color[ev]
handles.append(
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
y, '.', clip_on=False, **kwargs)[0])
if equal_spacing:
ax.set_ylim(0, unique_events_id.size + 1)
ax.set_yticks(1 + np.arange(unique_events_id.size))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([min_event - 1, max_event + 1])
ax.set(xlabel=xlabel, ylabel='Event id', xlim=[0, max_x])
ax.grid(True)
fig = fig if fig is not None else plt.gcf()
# reverse order so that the highest numbers are at the top
# (match plot order)
handles, labels = handles[::-1], labels[::-1]
box = ax.get_position()
factor = 0.8 if event_id is not None else 0.9
ax.set_position([box.x0, box.y0, box.width * factor, box.height])
ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5),
fontsize='small')
fig.canvas.draw()
plt_show(show)
return fig
def _get_presser(fig):
"""Get our press callback."""
import matplotlib
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for key, val in callbacks.items():
if LooseVersion(matplotlib.__version__) >= '3':
func = val()
else:
func = val.func
if func.__class__.__name__ == 'partial':
break
else:
func = None
assert func is not None
return func
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
"""Plot the amplitude traces of a set of dipoles.
Parameters
----------
dipoles : list of instance of Dipole
The dipoles whose amplitudes should be shown.
colors : list of color | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if colors is None:
colors = cycle(_get_color_list())
fig, ax = plt.subplots(1, 1)
xlim = [np.inf, -np.inf]
for dip, color in zip(dipoles, colors):
ax.plot(dip.times, dip.amplitude * 1e9, color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[-1])
ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')
if show:
fig.show(warn=False)
return fig
def adjust_axes(axes, remove_spines=('top', 'right'), grid=True):
"""Adjust some properties of axes.
Parameters
----------
axes : list
List of axes to process.
remove_spines : list of str
Which axis spines to remove.
grid : bool
Turn grid on (True) or off (False).
"""
axes = [axes] if not isinstance(axes, (list, tuple, np.ndarray)) else axes
for ax in axes:
if grid:
ax.grid(zorder=0)
for key in remove_spines:
ax.spines[key].set_visible(False)
def _filter_ticks(lims, fscale):
"""Create approximately spaced ticks between lims."""
if fscale == 'linear':
return None, None # let matplotlib handle it
lims = np.array(lims)
ticks = list()
if lims[1] > 20 * lims[0]:
base = np.array([1, 2, 4])
else:
base = np.arange(1, 11)
for exp in range(int(np.floor(np.log10(lims[0]))),
int(np.floor(np.log10(lims[1]))) + 1):
ticks += (base * (10 ** exp)).tolist()
ticks = np.array(ticks)
ticks = ticks[(ticks >= lims[0]) & (ticks <= lims[1])]
ticklabels = [('%g' if t < 1 else '%d') % t for t in ticks]
return ticks, ticklabels
def _get_flim(flim, fscale, freq, sfreq=None):
"""Get reasonable frequency limits."""
if flim is None:
if freq is None:
flim = [0.1 if fscale == 'log' else 0., sfreq / 2.]
else:
if fscale == 'linear':
flim = [freq[0]]
else:
flim = [freq[0] if freq[0] > 0 else 0.1 * freq[1]]
flim += [freq[-1]]
if fscale == 'log':
if flim[0] <= 0:
raise ValueError('flim[0] must be positive, got %s' % flim[0])
elif flim[0] < 0:
raise ValueError('flim[0] must be non-negative, got %s' % flim[0])
return flim
def _check_fscale(fscale):
"""Check for valid fscale."""
if not isinstance(fscale, str) or fscale not in ('log', 'linear'):
raise ValueError('fscale must be "log" or "linear", got %s'
% (fscale,))
_DEFAULT_ALIM = (-80, 10)
def plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4',
flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True,
compensate=False, plot=('time', 'magnitude', 'delay'),
axes=None):
"""Plot properties of a filter.
Parameters
----------
h : dict or ndarray
An IIR dict or 1D ndarray of coefficients (for FIR filter).
sfreq : float
Sample rate of the data (Hz).
freq : array-like or None
The ideal response frequencies to plot (must be in ascending order).
If None (default), do not plot the ideal response.
gain : array-like or None
The ideal response gains to plot.
If None (default), do not plot the ideal response.
title : str | None
The title to use. If None (default), determine the title based
on the type of the system.
color : color object
The color to use (default '#1f77b4').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None, freq will be used. If None (default) and freq is None,
``(0.1, sfreq / 2.)`` will be used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
The y-axis amplitude limits (dB) to use (default: (-60, 10)).
show : bool
Show figure if True (default).
compensate : bool
If True, compensate for the filter delay (phase will not be shown).
- For linear-phase FIR filters, this visualizes the filter coefficients
assuming that the output will be shifted by ``N // 2``.
- For IIR filters, this changes the filter coefficient display
by filtering backward and forward, and the frequency response
by squaring it.
.. versionadded:: 0.18
plot : list | tuple | str
A list of the requested plots from ``time``, ``magnitude`` and
``delay``. Default is to plot all three filter properties
('time', 'magnitude', 'delay').
.. versionadded:: 0.21.0
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of requested plot types. If instance of
Axes, there must be only one filter property plotted.
Defaults to ``None``.
.. versionadded:: 0.21.0
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the plots.
See Also
--------
mne.filter.create_filter
plot_ideal_filter
Notes
-----
.. versionadded:: 0.14
"""
from scipy.signal import (
freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt)
import matplotlib.pyplot as plt
sfreq = float(sfreq)
_check_option('fscale', fscale, ['log', 'linear'])
if isinstance(plot, str):
plot = [plot]
for xi, x in enumerate(plot):
_check_option('plot[%d]' % xi, x, ('magnitude', 'delay', 'time'))
flim = _get_flim(flim, fscale, freq, sfreq)
if fscale == 'log':
omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)
else:
omega = np.linspace(flim[0], flim[1], 1000)
xticks, xticklabels = _filter_ticks(flim, fscale)
omega /= sfreq / (2 * np.pi)
if isinstance(h, dict): # IIR h.ndim == 2: # second-order sections
if 'sos' in h:
H = np.ones(len(omega), np.complex128)
gd = np.zeros(len(omega))
for section in h['sos']:
this_H = freqz(section[:3], section[3:], omega)[1]
H *= this_H
if compensate:
H *= this_H.conj() # time reversal is freq conj
else:
# Assume the forward-backward delay zeros out, which it
# mostly should
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd += group_delay((section[:3], section[3:]), omega)[1]
n = estimate_ringing_samples(h['sos'])
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = sosfiltfilt
gd += (len(delta) - 1) // 2
else:
func = sosfilt
h = func(h['sos'], delta)
else:
H = freqz(h['b'], h['a'], omega)[1]
if compensate:
H *= H.conj()
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h['b'], h['a']), omega)[1]
if compensate:
gd += group_delay(h['b'].conj(), h['a'].conj(), omega)[1]
n = estimate_ringing_samples((h['b'], h['a']))
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = filtfilt
else:
func = lfilter
h = func(h['b'], h['a'], delta)
if title is None:
title = 'SOS (IIR) filter'
if compensate:
title += ' (forward-backward)'
else:
H = freqz(h, worN=omega)[1]
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h, [1.]), omega)[1]
title = 'FIR filter' if title is None else title
if compensate:
title += ' (delay-compensated)'
fig = None
if axes is None:
fig, axes = plt.subplots(len(plot), 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if len(axes) != len(plot):
raise ValueError('Length of axes (%d) must be the same as number of '
'requested filter properties (%d)'
% (len(axes), len(plot)))
t = np.arange(len(h))
dlim = np.abs(t).max() / 2.
dlim = [-dlim, dlim]
if compensate:
n_shift = (len(h) - 1) // 2
t -= n_shift
assert t[0] == -t[-1]
gd -= n_shift
t = t / sfreq
gd = gd / sfreq
f = omega * sfreq / (2 * np.pi)
sl = slice(0 if fscale == 'linear' else 1, None, None)
mag = 10 * np.log10(np.maximum((H * H.conj()).real, 1e-20))
if 'time' in plot:
ax_time_idx = np.where([p == 'time' for p in plot])[0][0]
axes[ax_time_idx].plot(t, h, color=color)
axes[ax_time_idx].set(xlim=t[[0, -1]], xlabel='Time (s)',
ylabel='Amplitude', title=title)
# Magnitude
if 'magnitude' in plot:
ax_mag_idx = np.where([p == 'magnitude' for p in plot])[0][0]
axes[ax_mag_idx].plot(f[sl], mag[sl], color=color,
linewidth=2, zorder=4)
if freq is not None and gain is not None:
plot_ideal_filter(freq, gain, axes[ax_mag_idx],
fscale=fscale, show=False)
axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel='', xscale=fscale)
if xticks is not None:
axes[ax_mag_idx].set(xticks=xticks)
axes[ax_mag_idx].set(xticklabels=xticklabels)
axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)',
ylabel='Amplitude (dB)')
# Delay
if 'delay' in plot:
ax_delay_idx = np.where([p == 'delay' for p in plot])[0][0]
axes[ax_delay_idx].plot(f[sl], gd[sl], color=color,
linewidth=2, zorder=4)
# shade nulled regions
for start, stop in zip(*_mask_to_onsets_offsets(mag <= -39.9)):
axes[ax_delay_idx].axvspan(f[start], f[stop - 1],
facecolor='k', alpha=0.05,
zorder=5)
axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)',
xlabel='Frequency (Hz)',
xscale=fscale)
if xticks is not None:
axes[ax_delay_idx].set(xticks=xticks)
axes[ax_delay_idx].set(xticklabels=xticklabels)
axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)',
ylabel='Delay (s)')
adjust_axes(axes)
tight_layout()
plt_show(show)
return fig
def plot_ideal_filter(freq, gain, axes=None, title='', flim=None, fscale='log',
alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--',
show=True):
"""Plot an ideal filter response.
Parameters
----------
freq : array-like
The ideal response frequencies to plot (must be in ascending order).
gain : array-like or None
The ideal response gains to plot.
axes : instance of Axes | None
The subplot handle. With None (default), axes are created.
title : str
The title to use, (default: '').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None (default), freq used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
If not None (default), the y-axis limits (dB) to use.
color : color object
The color to use (default: 'r').
alpha : float
The alpha to use (default: 0.5).
linestyle : str
The line style to use (default: '--').
show : bool
Show figure if True (default).
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
plot_filter
Notes
-----
.. versionadded:: 0.14
Examples
--------
Plot a simple ideal band-pass filter::
>>> from mne.viz import plot_ideal_filter
>>> freq = [0, 1, 40, 50]
>>> gain = [0, 1, 1, 0]
>>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS
<...Figure...>
"""
import matplotlib.pyplot as plt
my_freq, my_gain = list(), list()
if freq[0] != 0:
raise ValueError('freq should start with DC (zero) and end with '
'Nyquist, but got %s for DC' % (freq[0],))
freq = np.array(freq)
# deal with semilogx problems @ x=0
_check_option('fscale', fscale, ['log', 'linear'])
if fscale == 'log':
freq[0] = 0.1 * freq[1] if flim is None else min(flim[0], freq[1])
flim = _get_flim(flim, fscale, freq)
transitions = list()
for ii in range(len(freq)):
if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:
transitions += [[freq[ii], freq[ii + 1]]]
my_freq += np.linspace(freq[ii], freq[ii + 1], 20,
endpoint=False).tolist()
my_gain += np.linspace(gain[ii], gain[ii + 1], 20,
endpoint=False).tolist()
else:
my_freq.append(freq[ii])
my_gain.append(gain[ii])
my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (alim[0] / 10.)))
if axes is None:
axes = plt.subplots(1)[1]
for transition in transitions:
axes.axvspan(*transition, color=color, alpha=0.1)
axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5,
linewidth=4, zorder=3)
xticks, xticklabels = _filter_ticks(flim, fscale)
axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)',
xscale=fscale)
if xticks is not None:
axes.set(xticks=xticks)
axes.set(xticklabels=xticklabels)
axes.set(xlim=flim)
if title:
axes.set(title=title)
adjust_axes(axes)
tight_layout()
plt_show(show)
return axes.figure
def _handle_event_colors(color_dict, unique_events, event_id):
"""Create event-integer-to-color mapping, assigning defaults as needed."""
default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))
# warn if not enough colors
if color_dict is None:
if len(unique_events) > len(_get_color_list()):
warn('More events than default colors available. You should pass '
'a list of unique colors.')
else:
custom_colors = dict()
for key, color in color_dict.items():
if key in unique_events: # key was a valid event integer
custom_colors[key] = color
elif key in event_id: # key was an event label
custom_colors[event_id[key]] = color
else: # key not a valid event, warn and ignore
warn('Event ID %s is in the color dict but is not '
'present in events or event_id.' % str(key))
# warn if color_dict is missing any entries
unassigned = sorted(set(unique_events) - set(custom_colors))
if len(unassigned):
unassigned_str = ', '.join(str(e) for e in unassigned)
warn('Color was not assigned for event%s %s. Default colors will '
'be used.' % (_pl(unassigned), unassigned_str))
default_colors.update(custom_colors)
return default_colors
@fill_doc
def plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None,
n_cols=None, show=True):
"""Plot CSD matrices.
A sub-plot is created for each frequency. If an info object is passed to
the function, different channel types are plotted in different figures.
Parameters
----------
csd : instance of CrossSpectralDensity
The CSD matrix to plot.
%(info)s
Used to split the figure by channel-type, if provided.
By default, the CSD matrix is plotted as a whole.
mode : 'csd' | 'coh'
Whether to plot the cross-spectral density ('csd', the default), or
the coherence ('coh') between the channels.
colorbar : bool
Whether to show a colorbar. Defaults to ``True``.
cmap : str | None
The matplotlib colormap to use. Defaults to None, which means the
colormap will default to matplotlib's default.
n_cols : int | None
CSD matrices are plotted in a grid. This parameter controls how
many matrix to plot side by side before starting a new row. By
default, a number will be chosen to make the grid as square as
possible.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : list of Figure
The figures created by this function.
"""
import matplotlib.pyplot as plt
if mode not in ['csd', 'coh']:
raise ValueError('"mode" should be either "csd" or "coh".')
if info is not None:
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[])
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=[])
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=[])
idx_eeg = [csd.ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in csd.ch_names]
idx_mag = [csd.ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in csd.ch_names]
idx_grad = [csd.ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in csd.ch_names]
indices = [idx_eeg, idx_mag, idx_grad]
titles = ['EEG', 'Magnetometers', 'Gradiometers']
if mode == 'csd':
# The units in which to plot the CSD
units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')
scalings = dict(eeg=1e12, grad=1e26, mag=1e30)
else:
indices = [np.arange(len(csd.ch_names))]
if mode == 'csd':
titles = ['Cross-spectral density']
# Units and scaling unknown
units = dict()
scalings = dict()
elif mode == 'coh':
titles = ['Coherence']
n_freqs = len(csd.frequencies)
if n_cols is None:
n_cols = int(np.ceil(np.sqrt(n_freqs)))
n_rows = int(np.ceil(n_freqs / float(n_cols)))
figs = []
for ind, title, ch_type in zip(indices, titles, ['eeg', 'mag', 'grad']):
if len(ind) == 0:
continue
fig, axes = plt.subplots(n_rows, n_cols, squeeze=False,
figsize=(2 * n_cols + 1, 2.2 * n_rows))
csd_mats = []
for i in range(len(csd.frequencies)):
cm = csd.get_data(index=i)[ind][:, ind]
if mode == 'csd':
cm = np.abs(cm) * scalings.get(ch_type, 1)
elif mode == 'coh':
# Compute coherence from the CSD matrix
psd = np.diag(cm).real
cm = np.abs(cm) ** 2 / psd[np.newaxis, :] / psd[:, np.newaxis]
csd_mats.append(cm)
vmax = np.max(csd_mats)
for i, (freq, mat) in enumerate(zip(csd.frequencies, csd_mats)):
ax = axes[i // n_cols][i % n_cols]
im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0,
vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if csd._is_sum:
ax.set_title('%.1f-%.1f Hz.' % (np.min(freq),
np.max(freq)))
else:
ax.set_title('%.1f Hz.' % freq)
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
if colorbar:
cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])
if mode == 'csd':
label = u'CSD'
if ch_type in units:
label += u' (%s)' % units[ch_type]
cb.set_label(label)
elif mode == 'coh':
cb.set_label('Coherence')
figs.append(fig)
plt_show(show)
return figs
def plot_chpi_snr(snr_dict, axes=None):
"""Plot time-varying SNR estimates of the HPI coils.
Parameters
----------
snr_dict : dict
The dictionary returned by `~mne.chpi.compute_chpi_snr`. Must have keys
``times``, ``freqs``, ``TYPE_snr``, ``TYPE_power``, and ``TYPE_resid``
(where ``TYPE`` can be ``mag`` or ``grad`` or both).
axes : None | list of matplotlib.axes.Axes
Figure axes in which to draw the SNR, power, and residual plots. The
number of axes should be 3× the number of MEG sensor types present in
``snr_dict``. If ``None`` (the default), a new
`~matplotlib.figure.Figure` is created with the required number of
axes.
Returns
-------
fig : instance of matplotlib.figure.Figure
A figure with subplots for SNR, power, and residual variance,
separately for magnetometers and/or gradiometers (depending on what is
present in ``snr_dict``).
Notes
-----
If you supply a list of existing `~matplotlib.axes.Axes`, then the figure
legend will not be drawn automatically. If you still want it, running
``fig.legend(loc='right', title='cHPI frequencies')`` will recreate it,
though you may also need to manually adjust the margin to make room for it
(e.g., using ``fig.subplots_adjust(right=0.8)``).
.. versionadded:: 0.24
"""
import matplotlib.pyplot as plt
valid_keys = list(snr_dict)[2:]
titles = dict(snr='SNR', power='cHPI power', resid='Residual variance')
full_names = dict(mag='magnetometers', grad='gradiometers')
axes_was_none = axes is None
if axes_was_none:
fig, axes = plt.subplots(len(valid_keys), 1, sharex=True)
else:
fig = axes[0].get_figure()
if len(axes) != len(valid_keys):
raise ValueError(f'axes must be a list of {len(valid_keys)} axes, got '
f'length {len(axes)} ({axes}).')
fig.set_size_inches(10, 10)
legend_labels_exist = False
for key, ax in zip(valid_keys, axes):
ch_type, kind = key.split('_')
scaling = 1 if kind == 'snr' else DEFAULTS['scalings'][ch_type]
plot_kwargs = dict(color='k') if kind == 'resid' else dict()
lines = ax.plot(snr_dict['times'], snr_dict[key] * scaling ** 2,
**plot_kwargs)
# the freqs should be the same for all sensor types (and for SNR and
# power subplots), so we only need to label the lines on one axes
# (otherwise we get duplicate legend entries).
if not legend_labels_exist:
for line, freq in zip(lines, snr_dict['freqs']):
line.set_label(f'{freq} Hz')
legend_labels_exist = True
unit = DEFAULTS['units'][ch_type]
unit = f'({unit})' if '/' in unit else unit
set_kwargs = dict(title=f'{titles[kind]}, {full_names[ch_type]}',
ylabel='dB' if kind == 'snr' else f'{unit}²')
if not axes_was_none:
set_kwargs.update(xlabel='Time (s)')
ax.set(**set_kwargs)
if axes_was_none:
ax.set(xlabel='Time (s)')
fig.align_ylabels()
fig.subplots_adjust(left=0.1, right=0.825, bottom=0.075, top=0.95,
hspace=0.7)
fig.legend(loc='right', title='cHPI frequencies')
return fig
|
"""
GroupBunk v.1.2
Leave your Facebook groups quietly
Author: Shine Jayakumar
Github: https://github.com/shine-jayakumar
LICENSE: MIT
"""
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import StaleElementReferenceException
from webdriver_manager.chrome import ChromeDriverManager
import argparse
import logging
import sys
from datetime import datetime
import time
from groupfuncs import *
import os
# suppress webdriver manager logs
os.environ['WDM_LOG_LEVEL'] = '0'
IGNORE_DIV = ['your feed', 'discover', 'your notifications']
FB_GROUP_URL = 'https://www.facebook.com/groups/feed/'
def display_intro():
'''
Displays intro of the script
'''
intro = """
GroupBunk v.1.2
Leave your Facebook groups quietly
Author: Shine Jayakumar
Github: https://github.com/shine-jayakumar
"""
print(intro)
def time_taken(start_time, logger):
'''
Calculates the time difference from now and start time
'''
end_time = time.time()
logger.info(f"Total time taken: {round(end_time - start_time, 4)} seconds")
def cleanup_and_quit(driver):
'''
Quits driver and exits the script
'''
if driver:
driver.quit()
sys.exit()
start_time = time.time()
# ====================================================
# Argument parsing
# ====================================================
description = "Leave your Facebook groups quietly"
usage = "groupbunk.py username password [-h] [-eg FILE] [-et TIMEOUT] [-sw WAIT] [-gr RETRYCOUNT] [-dg FILE]"
examples="""
Examples:
groupbunk.py [email protected] bobspassword101
groupbunk.py [email protected] bobspassword101 -eg keepgroups.txt
groupbunk.py [email protected] bobspassword101 -et 60 --scrollwait 10 -gr 7
groupbunk.py [email protected] bobspassword101 --dumpgroups mygroup.txt --groupretry 5
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description,
usage=usage,
epilog=examples,
prog='groupbunk')
# required arguments
parser.add_argument('username', type=str, help='Facebook username')
parser.add_argument('password', type=str, help='Facebook password')
# optional arguments
parser.add_argument('-eg', '--exgroups', type=str, metavar='', help='file with group names to exclude (one group per line)')
parser.add_argument('-et', '--eltimeout', type=int, metavar='', help='max timeout for elements to be loaded', default=30)
parser.add_argument('-sw', '--scrollwait', type=int, metavar='', help='time to wait after each scroll', default=4)
parser.add_argument('-gr', '--groupretry', type=int, metavar='', help='retry count while recapturing group names', default=5)
parser.add_argument('-dg', '--dumpgroups', type=str, metavar='', help='do not leave groups; only dump group names to a file')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v.1.2')
args = parser.parse_args()
# ====================================================
# Setting up logger
# =====================================================
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s:%(name)s:%(lineno)d:%(levelname)s:%(message)s")
file_handler = logging.FileHandler(f'groupbunk_{datetime.now().strftime('%d_%m_%Y__%H_%M_%S')}.log', 'w', 'utf-8')
file_handler.setFormatter(formatter)
stdout_formatter = logging.Formatter("[*] => %(message)s")
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(stdout_formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
#=======================================================
try:
display_intro()
logger.info("script started")
# loading group names to be excluded
if args.exgroups:
logger.info("Loading group names to be excluded")
excluded_group_names = get_excluded_group_names(args.exgroups)
IGNORE_DIV.extend(excluded_group_names)
options = Options()
# supresses notifications
options.add_argument("--disable-notifications")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--log-level=3")
logger.info("Downloading latest chrome webdriver")
# UNCOMMENT TO SPECIFY DRIVER LOCATION
# driver = webdriver.Chrome("D:/chromedriver/98/chromedriver.exe", options=options)
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
if not driver:
raise Exception('Unable to download chrome webdriver for your version of Chrome browser')
logger.info("Successfully downloaded chrome webdriver")
wait = WebDriverWait(driver, args.eltimeout)
logger.info(f"Opening FB GROUPS URL: {FB_GROUP_URL}")
driver.get(FB_GROUP_URL)
logger.info("Sending username")
wait.until(EC.visibility_of_element_located((By.ID, 'email'))).send_keys(args.username)
logger.info("Sending password")
driver.find_element(By.ID, 'pass').send_keys(args.password)
logger.info("Clicking on Log In")
wait.until(EC.presence_of_element_located((By.ID, 'loginbutton'))).click()
# get all the links inside divs representing group names
group_links = get_group_link_elements(driver, wait)
if not group_links:
raise Exception("Unable to find links")
no_of_currently_loaded_links = 0
logger.info(f"Initial link count: {len(group_links)-3}")
logger.info("Scrolling down to capture all the links")
# scroll until no new group links are loaded
while len(group_links) > no_of_currently_loaded_links:
no_of_currently_loaded_links = len(group_links)
logger.info(f"Updated link count: {no_of_currently_loaded_links-3}")
scroll_into_view(driver, group_links[no_of_currently_loaded_links-1])
time.sleep(args.scrollwait)
# re-capturing
group_links = get_group_link_elements(driver, wait)
logger.info(f"Total number of links found: {len(group_links)-3}")
# only show the group names and exit
if args.dumpgroups:
logger.info('Only dumping group names to file. Not leaving groups')
logger.info(f"Dumping group names to: {args.dumpgroups}")
dump_groups(group_links, args.dumpgroups)
time_taken(start_time, logger)
cleanup_and_quit(driver)
# first 3 links are for Your feed, 'Discover, Your notifications
i = 0
save_state = 0
no_of_retries = 0
failed_groups = []
total_groups = len(group_links)
while i < total_groups:
try:
# need only the group name and not Last Active
group_name = group_links[i].text.split('\n')[0]
# if group name not in ignore list
if group_name.lower() not in IGNORE_DIV:
logger.info(f"Leaving group: {group_name}")
link = group_links[i].get_attribute('href')
logger.info(f"Opening group link: {link}")
switch_tab(driver, open_new_tab(driver))
driver.get(link)
if not leave_group(wait):
logger.info('Unable to leave the group. You might not be a member of this group.')
driver.close()
switch_tab(driver, driver.window_handles[0])
else:
if group_name.lower() not in ['your feed', 'discover', 'your notifications']:
logger.info(f"Skipping group : {group_name}")
i += 1
except StaleElementReferenceException:
logger.error('Captured group elements gone stale. Recapturing...')
if no_of_retries > args.groupretry:
logger.error('Reached max number of retry attempts')
break
save_state = i
group_links = get_group_link_elements(driver, wait)
no_of_retries += 1
except Exception as ex:
logger.error(f"Unable to leave group {group_name}. Error: {ex}")
failed_groups.append(group_name)
i += 1
total_no_of_groups = len(group_links)-3
total_no_failed_groups = len(failed_groups)
logger.info(f"Total groups: {total_no_of_groups}")
logger.info(f"No. of groups failed to leave: {total_no_failed_groups}")
logger.info(f"Success percentage: {((total_no_of_groups - total_no_failed_groups)/total_no_of_groups) * 100} %")
if failed_groups:
failed_group_names = ", ".join(failed_groups)
logger.info(f"Failed groups: \n{failed_group_names}")
except Exception as ex:
logger.error(f"Script ended with exception: {ex}")
finally:
time_taken(start_time, logger)
cleanup_and_quit(driver)
|
"""
GroupBunk v.1.2
Leave your Facebook groups quietly
Author: Shine Jayakumar
Github: https://github.com/shine-jayakumar
LICENSE: MIT
"""
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import StaleElementReferenceException
from webdriver_manager.chrome import ChromeDriverManager
import argparse
import logging
import sys
from datetime import datetime
import time
from groupfuncs import *
import os
# suppress webdriver manager logs
os.environ['WDM_LOG_LEVEL'] = '0'
IGNORE_DIV = ['your feed', 'discover', 'your notifications']
FB_GROUP_URL = 'https://www.facebook.com/groups/feed/'
def display_intro():
'''
Displays intro of the script
'''
intro = """
GroupBunk v.1.2
Leave your Facebook groups quietly
Author: Shine Jayakumar
Github: https://github.com/shine-jayakumar
"""
print(intro)
def time_taken(start_time, logger):
'''
Calculates the time difference from now and start time
'''
end_time = time.time()
logger.info(f"Total time taken: {round(end_time - start_time, 4)} seconds")
def cleanup_and_quit(driver):
'''
Quits driver and exits the script
'''
if driver:
driver.quit()
sys.exit()
start_time = time.time()
# ====================================================
# Argument parsing
# ====================================================
description = "Leave your Facebook groups quietly"
usage = "groupbunk.py username password [-h] [-eg FILE] [-et TIMEOUT] [-sw WAIT] [-gr RETRYCOUNT] [-dg FILE]"
examples="""
Examples:
groupbunk.py [email protected] bobspassword101
groupbunk.py [email protected] bobspassword101 -eg keepgroups.txt
groupbunk.py [email protected] bobspassword101 -et 60 --scrollwait 10 -gr 7
groupbunk.py [email protected] bobspassword101 --dumpgroups mygroup.txt --groupretry 5
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description,
usage=usage,
epilog=examples,
prog='groupbunk')
# required arguments
parser.add_argument('username', type=str, help='Facebook username')
parser.add_argument('password', type=str, help='Facebook password')
# optional arguments
parser.add_argument('-eg', '--exgroups', type=str, metavar='', help='file with group names to exclude (one group per line)')
parser.add_argument('-et', '--eltimeout', type=int, metavar='', help='max timeout for elements to be loaded', default=30)
parser.add_argument('-sw', '--scrollwait', type=int, metavar='', help='time to wait after each scroll', default=4)
parser.add_argument('-gr', '--groupretry', type=int, metavar='', help='retry count while recapturing group names', default=5)
parser.add_argument('-dg', '--dumpgroups', type=str, metavar='', help='do not leave groups; only dump group names to a file')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v.1.2')
args = parser.parse_args()
# ====================================================
# Setting up logger
# =====================================================
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s:%(name)s:%(lineno)d:%(levelname)s:%(message)s")
file_handler = logging.FileHandler(f'groupbunk_{datetime.now().strftime("%d_%m_%Y__%H_%M_%S")}.log', 'w', 'utf-8')
file_handler.setFormatter(formatter)
stdout_formatter = logging.Formatter("[*] => %(message)s")
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(stdout_formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
#=======================================================
try:
display_intro()
logger.info("script started")
# loading group names to be excluded
if args.exgroups:
logger.info("Loading group names to be excluded")
excluded_group_names = get_excluded_group_names(args.exgroups)
IGNORE_DIV.extend(excluded_group_names)
options = Options()
# supresses notifications
options.add_argument("--disable-notifications")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--log-level=3")
logger.info("Downloading latest chrome webdriver")
# UNCOMMENT TO SPECIFY DRIVER LOCATION
# driver = webdriver.Chrome("D:/chromedriver/98/chromedriver.exe", options=options)
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
if not driver:
raise Exception('Unable to download chrome webdriver for your version of Chrome browser')
logger.info("Successfully downloaded chrome webdriver")
wait = WebDriverWait(driver, args.eltimeout)
logger.info(f"Opening FB GROUPS URL: {FB_GROUP_URL}")
driver.get(FB_GROUP_URL)
logger.info("Sending username")
wait.until(EC.visibility_of_element_located((By.ID, 'email'))).send_keys(args.username)
logger.info("Sending password")
driver.find_element(By.ID, 'pass').send_keys(args.password)
logger.info("Clicking on Log In")
wait.until(EC.presence_of_element_located((By.ID, 'loginbutton'))).click()
# get all the links inside divs representing group names
group_links = get_group_link_elements(driver, wait)
if not group_links:
raise Exception("Unable to find links")
no_of_currently_loaded_links = 0
logger.info(f"Initial link count: {len(group_links)-3}")
logger.info("Scrolling down to capture all the links")
# scroll until no new group links are loaded
while len(group_links) > no_of_currently_loaded_links:
no_of_currently_loaded_links = len(group_links)
logger.info(f"Updated link count: {no_of_currently_loaded_links-3}")
scroll_into_view(driver, group_links[no_of_currently_loaded_links-1])
time.sleep(args.scrollwait)
# re-capturing
group_links = get_group_link_elements(driver, wait)
logger.info(f"Total number of links found: {len(group_links)-3}")
# only show the group names and exit
if args.dumpgroups:
logger.info('Only dumping group names to file. Not leaving groups')
logger.info(f"Dumping group names to: {args.dumpgroups}")
dump_groups(group_links, args.dumpgroups)
time_taken(start_time, logger)
cleanup_and_quit(driver)
# first 3 links are for Your feed, 'Discover, Your notifications
i = 0
save_state = 0
no_of_retries = 0
failed_groups = []
total_groups = len(group_links)
while i < total_groups:
try:
# need only the group name and not Last Active
group_name = group_links[i].text.split('\n')[0]
# if group name not in ignore list
if group_name.lower() not in IGNORE_DIV:
logger.info(f"Leaving group: {group_name}")
link = group_links[i].get_attribute('href')
logger.info(f"Opening group link: {link}")
switch_tab(driver, open_new_tab(driver))
driver.get(link)
if not leave_group(wait):
logger.info('Unable to leave the group. You might not be a member of this group.')
driver.close()
switch_tab(driver, driver.window_handles[0])
else:
if group_name.lower() not in ['your feed', 'discover', 'your notifications']:
logger.info(f"Skipping group : {group_name}")
i += 1
except StaleElementReferenceException:
logger.error('Captured group elements gone stale. Recapturing...')
if no_of_retries > args.groupretry:
logger.error('Reached max number of retry attempts')
break
save_state = i
group_links = get_group_link_elements(driver, wait)
no_of_retries += 1
except Exception as ex:
logger.error(f"Unable to leave group {group_name}. Error: {ex}")
failed_groups.append(group_name)
i += 1
total_no_of_groups = len(group_links)-3
total_no_failed_groups = len(failed_groups)
logger.info(f"Total groups: {total_no_of_groups}")
logger.info(f"No. of groups failed to leave: {total_no_failed_groups}")
logger.info(f"Success percentage: {((total_no_of_groups - total_no_failed_groups)/total_no_of_groups) * 100} %")
if failed_groups:
failed_group_names = ", ".join(failed_groups)
logger.info(f"Failed groups: \n{failed_group_names}")
except Exception as ex:
logger.error(f"Script ended with exception: {ex}")
finally:
time_taken(start_time, logger)
cleanup_and_quit(driver)
|
#!/usr/bin/python3
# **************************************************************************** #
# #
# :::::::: #
# Artsy.py :+: :+: #
# +:+ #
# By: peerdb <[email protected]> +#+ #
# +#+ #
# Created: 2020/11/30 19:16:01 by peerdb #+# #+# #
# Updated: 2020/11/30 19:52:56 by peerdb ######## odam.nl #
# #
# **************************************************************************** #
import sys, time, random, os
from PIL import Image
from GenerateASCII import generate_ascii
if len(sys.argv) >= 2:
imageName = sys.argv[1] + ".gif"
else:
imageName = "thor.gif"
try:
if '../' in imageName:
print('Please only supply images in the "imgs/" folder')
raise FileNotFoundError
im = Image.open("imgs/" + imageName)
except FileNotFoundError:
print(f'Problem opening {'imgs/' + imageName}.\nPlease check your path again.')
exit(1)
while True:
for frame in range(im.n_frames):
im.seek(frame)
generate_ascii(im)
time.sleep(4.0 / im.n_frames)
|
#!/usr/bin/python3
# **************************************************************************** #
# #
# :::::::: #
# Artsy.py :+: :+: #
# +:+ #
# By: peerdb <[email protected]> +#+ #
# +#+ #
# Created: 2020/11/30 19:16:01 by peerdb #+# #+# #
# Updated: 2020/11/30 19:52:56 by peerdb ######## odam.nl #
# #
# **************************************************************************** #
import sys, time, random, os
from PIL import Image
from GenerateASCII import generate_ascii
if len(sys.argv) >= 2:
imageName = sys.argv[1] + ".gif"
else:
imageName = "thor.gif"
try:
if '../' in imageName:
print('Please only supply images in the "imgs/" folder')
raise FileNotFoundError
im = Image.open("imgs/" + imageName)
except FileNotFoundError:
print(f'Problem opening {"imgs/" + imageName}.\nPlease check your path again.')
exit(1)
while True:
for frame in range(im.n_frames):
im.seek(frame)
generate_ascii(im)
time.sleep(4.0 / im.n_frames)
|
# MIT License
#
# Copyright (c) 2020 Genesis Cloud Ltd. <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Oz Tiram <[email protected]>
"""
An example script to show how to start a Genesis Cloud GPU instance
with custom user data to install the NVIDIA GPU driver.
Grab your API key from the UI and save it in a safe place.
on the shell before running this script
$ export GENESISCLOUD_API_KEY=secretkey
"""
import os
import textwrap
import time
import subprocess as sp
from genesiscloud.client import Client, INSTANCE_TYPES
def simple_startup_script():
"""see the documentation of cloud init"""
return textwrap.dedent("""
#cloud-config
hostname: mytestubuntu
runcmd:
- [ "apt", "install", "-y", "vim" ]
""")
def get_startup_script():
return """#!/bin/bash
set -eux
IS_INSTALLED=false
NVIDIA_SHORT_VERSION=430
manual_fetch_install() {
__nvidia_full_version="430_430.50-0ubuntu2"
for i in $(seq 1 5)
do
echo "Connecting to http://archive.ubuntu.com site for $i time"
if curl -s --head --request GET http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-"${NVIDIA_SHORT_VERSION}" | grep "HTTP/1.1" > /dev/null ;
then
echo "Connected to http://archive.ubuntu.com. Start downloading and installing the NVIDIA driver..."
__tempdir="$(mktemp -d)"
apt-get install -y --no-install-recommends "linux-headers-$(uname -r)" dkms
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-kernel-common-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-kernel-source-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-dkms-${__nvidia_full_version}_amd64.deb
dpkg -i "${__tempdir}"/nvidia-kernel-common-${__nvidia_full_version}_amd64.deb "${__tempdir}"/nvidia-kernel-source-${__nvidia_full_version}_amd64.deb "${__tempdir}"/nvidia-dkms-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-utils-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/libnvidia-compute-${__nvidia_full_version}_amd64.deb
dpkg -i "${__tempdir}"/nvidia-utils-${__nvidia_full_version}_amd64.deb "${__tempdir}"/libnvidia-compute-${__nvidia_full_version}_amd64.deb
IS_INSTALLED=true
rm -r "${__tempdir}"
break
fi
sleep 2
done
}
apt_fetch_install() {
add-apt-repository -s -u -y restricted
# Ubuntu has only a single version in the repository marked as "latest" of
# this series.
for _ in $(seq 1 5)
do
if apt-get install -y --no-install-recommends nvidia-utils-${NVIDIA_SHORT_VERSION} libnvidia-compute-${NVIDIA_SHORT_VERSION} \
nvidia-kernel-common-${NVIDIA_SHORT_VERSION} \
nvidia-kernel-source-${NVIDIA_SHORT_VERSION} \
nvidia-dkms-${NVIDIA_SHORT_VERSION} \
"linux-headers-$(uname -r)" dkms; then
IS_INSTALLED=true
break
fi
sleep 2
done
}
main() {
apt-get update
if grep xenial /etc/os-release; then
manual_fetch_install
else
apt_fetch_install
fi
# remove the module if it is inserted, blacklist it
rmmod nouveau || echo "nouveau kernel module not loaded ..."
echo "blacklist nouveau" > /etc/modprobe.d/nouveau.conf
# log insertion of the nvidia module
# this should always succeed on customer instances
if modprobe -vi nvidia; then
nvidia-smi
modinfo nvidia
gpu_found=true
else
gpu_found=false
fi
if [ "${IS_INSTALLED}" = true ]; then
echo "NVIDIA driver has been successfully installed."
else
echo "NVIDIA driver has NOT been installed."
fi
if [ "${gpu_found}" ]; then
echo "NVIDIA GPU device is found and ready"
else
echo "WARNING: NVIDIA GPU device is not found or is failed"
fi
}
main
"""
def create_instance():
client = Client(os.getenv("GENESISCLOUD_API_KEY"))
# before we continue to create objects, we check that we can communicate
# with the API, if the connect method does not succeed it will throw an
# error and the script will terminate
if client.connect():
pass
# To create an instance you will need an SSH public key.
# Upload it via the Web UI, you can now find it with.
# replace this to match your key
SSHKEYNAME = 'YourKeyName'
# genesiscloud.client.Resource.find methods returns generators - that is,
# they are lazy per-default.
sshkey_gen = client.SSHKeys.find({"name": SSHKEYNAME})
sshkey = list(sshkey_gen)[0]
# You need to tell the client which OS should be used for your instance
# One can use a snapshot or a base-os to create a new instance
ubuntu_18 = [image for image in client.Images.find({"name": 'Ubuntu 18.04'})][0]
# choose the most simple instance type
# to see the instance properties, use
# list(INSTANCE_TYPES.items())[0]
#
# ('vcpu-4_memory-12g_disk-80g_nvidia1080ti-1',
# {'vCPUs': 4, 'RAM': 12, 'Disk': 80, 'GPU': 1})
instace_type = list(INSTANCE_TYPES.keys())[0]
# To create an instace use Instances.create
# You must pass a ssh key to SSH into the machine. Currently, only one
# SSH key is supported. If you need more use the command
# `ssh-import-id-gh oz123`
# it can fetch public key from github.com/oz123.keys
# *Obviously* __replace__ my user name with YOURS or anyone you TRUST.
# You should put this in the user_data script. You can add this in the
# text block that the function `get_startup_script` returns.
# NOTE:
# you can also create an instance with SSH password enabled, but you should
# prefer SSH key authentication. If you choose to use password, you should
# not pass ssh_keys
my_instance = client.Instances.create(
name="demo",
hostname="demo",
ssh_keys=[sshkey.id], # comment this to enable password
image=ubuntu_18.id,
type=instace_type,
metadata={"startup_script":
simple_startup_script()},
#password="yourSekretPassword#12!"
)
# my_instance is a dictionary containing information about the instance
# that was just created.
print(my_instance)
while my_instance['status'] != 'active':
time.sleep(1)
my_instance = client.Instances.get(my_instance.id)
print(f"{my_instance["status"]}\r", end="")
print("")
# yay! the instance is active
# let's ssh to the public IP of the instance
public_ip = my_instance.public_ip
print(f"The ssh address of the Instance is: {public_ip}")
# wait for ssh to become available, this returns exit code other
# than 0 as long the ssh connection isn't available
while sp.run(
("ssh -l ubuntu -o StrictHostKeyChecking=accept-new "
"-o ConnectTimeout=50 "
f"{public_ip} hostname"), shell=True).returncode:
time.sleep(1)
print("Congratulations! You genesiscloud instance has been created!")
print("You can ssh to it with:")
print(f"ssh -l ubuntu {public_ip}")
print("Some interesting commands to try at first:")
print("cloud-init stats # if this is still running, NVIDIA driver is still"
" installing")
print("use the following to see cloud-init output in real time:")
print("sudo tail -f /var/log/cloud-init-output.log")
return my_instance
def destroy(instance_id):
# finally destory this instance, when you no longer need it
client = Client(os.getenv("GENESISCLOUD_API_KEY"))
client.Instances.delete(id=instance_id)
if __name__ == "__main__":
instance = create_instance()
instance_id = instance['id']
# destroy(instance_id)
|
# MIT License
#
# Copyright (c) 2020 Genesis Cloud Ltd. <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Oz Tiram <[email protected]>
"""
An example script to show how to start a Genesis Cloud GPU instance
with custom user data to install the NVIDIA GPU driver.
Grab your API key from the UI and save it in a safe place.
on the shell before running this script
$ export GENESISCLOUD_API_KEY=secretkey
"""
import os
import textwrap
import time
import subprocess as sp
from genesiscloud.client import Client, INSTANCE_TYPES
def simple_startup_script():
"""see the documentation of cloud init"""
return textwrap.dedent("""
#cloud-config
hostname: mytestubuntu
runcmd:
- [ "apt", "install", "-y", "vim" ]
""")
def get_startup_script():
return """#!/bin/bash
set -eux
IS_INSTALLED=false
NVIDIA_SHORT_VERSION=430
manual_fetch_install() {
__nvidia_full_version="430_430.50-0ubuntu2"
for i in $(seq 1 5)
do
echo "Connecting to http://archive.ubuntu.com site for $i time"
if curl -s --head --request GET http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-"${NVIDIA_SHORT_VERSION}" | grep "HTTP/1.1" > /dev/null ;
then
echo "Connected to http://archive.ubuntu.com. Start downloading and installing the NVIDIA driver..."
__tempdir="$(mktemp -d)"
apt-get install -y --no-install-recommends "linux-headers-$(uname -r)" dkms
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-kernel-common-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-kernel-source-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-dkms-${__nvidia_full_version}_amd64.deb
dpkg -i "${__tempdir}"/nvidia-kernel-common-${__nvidia_full_version}_amd64.deb "${__tempdir}"/nvidia-kernel-source-${__nvidia_full_version}_amd64.deb "${__tempdir}"/nvidia-dkms-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-utils-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/libnvidia-compute-${__nvidia_full_version}_amd64.deb
dpkg -i "${__tempdir}"/nvidia-utils-${__nvidia_full_version}_amd64.deb "${__tempdir}"/libnvidia-compute-${__nvidia_full_version}_amd64.deb
IS_INSTALLED=true
rm -r "${__tempdir}"
break
fi
sleep 2
done
}
apt_fetch_install() {
add-apt-repository -s -u -y restricted
# Ubuntu has only a single version in the repository marked as "latest" of
# this series.
for _ in $(seq 1 5)
do
if apt-get install -y --no-install-recommends nvidia-utils-${NVIDIA_SHORT_VERSION} libnvidia-compute-${NVIDIA_SHORT_VERSION} \
nvidia-kernel-common-${NVIDIA_SHORT_VERSION} \
nvidia-kernel-source-${NVIDIA_SHORT_VERSION} \
nvidia-dkms-${NVIDIA_SHORT_VERSION} \
"linux-headers-$(uname -r)" dkms; then
IS_INSTALLED=true
break
fi
sleep 2
done
}
main() {
apt-get update
if grep xenial /etc/os-release; then
manual_fetch_install
else
apt_fetch_install
fi
# remove the module if it is inserted, blacklist it
rmmod nouveau || echo "nouveau kernel module not loaded ..."
echo "blacklist nouveau" > /etc/modprobe.d/nouveau.conf
# log insertion of the nvidia module
# this should always succeed on customer instances
if modprobe -vi nvidia; then
nvidia-smi
modinfo nvidia
gpu_found=true
else
gpu_found=false
fi
if [ "${IS_INSTALLED}" = true ]; then
echo "NVIDIA driver has been successfully installed."
else
echo "NVIDIA driver has NOT been installed."
fi
if [ "${gpu_found}" ]; then
echo "NVIDIA GPU device is found and ready"
else
echo "WARNING: NVIDIA GPU device is not found or is failed"
fi
}
main
"""
def create_instance():
client = Client(os.getenv("GENESISCLOUD_API_KEY"))
# before we continue to create objects, we check that we can communicate
# with the API, if the connect method does not succeed it will throw an
# error and the script will terminate
if client.connect():
pass
# To create an instance you will need an SSH public key.
# Upload it via the Web UI, you can now find it with.
# replace this to match your key
SSHKEYNAME = 'YourKeyName'
# genesiscloud.client.Resource.find methods returns generators - that is,
# they are lazy per-default.
sshkey_gen = client.SSHKeys.find({"name": SSHKEYNAME})
sshkey = list(sshkey_gen)[0]
# You need to tell the client which OS should be used for your instance
# One can use a snapshot or a base-os to create a new instance
ubuntu_18 = [image for image in client.Images.find({"name": 'Ubuntu 18.04'})][0]
# choose the most simple instance type
# to see the instance properties, use
# list(INSTANCE_TYPES.items())[0]
#
# ('vcpu-4_memory-12g_disk-80g_nvidia1080ti-1',
# {'vCPUs': 4, 'RAM': 12, 'Disk': 80, 'GPU': 1})
instace_type = list(INSTANCE_TYPES.keys())[0]
# To create an instace use Instances.create
# You must pass a ssh key to SSH into the machine. Currently, only one
# SSH key is supported. If you need more use the command
# `ssh-import-id-gh oz123`
# it can fetch public key from github.com/oz123.keys
# *Obviously* __replace__ my user name with YOURS or anyone you TRUST.
# You should put this in the user_data script. You can add this in the
# text block that the function `get_startup_script` returns.
# NOTE:
# you can also create an instance with SSH password enabled, but you should
# prefer SSH key authentication. If you choose to use password, you should
# not pass ssh_keys
my_instance = client.Instances.create(
name="demo",
hostname="demo",
ssh_keys=[sshkey.id], # comment this to enable password
image=ubuntu_18.id,
type=instace_type,
metadata={"startup_script":
simple_startup_script()},
#password="yourSekretPassword#12!"
)
# my_instance is a dictionary containing information about the instance
# that was just created.
print(my_instance)
while my_instance['status'] != 'active':
time.sleep(1)
my_instance = client.Instances.get(my_instance.id)
print(f"{my_instance['status']}\r", end="")
print("")
# yay! the instance is active
# let's ssh to the public IP of the instance
public_ip = my_instance.public_ip
print(f"The ssh address of the Instance is: {public_ip}")
# wait for ssh to become available, this returns exit code other
# than 0 as long the ssh connection isn't available
while sp.run(
("ssh -l ubuntu -o StrictHostKeyChecking=accept-new "
"-o ConnectTimeout=50 "
f"{public_ip} hostname"), shell=True).returncode:
time.sleep(1)
print("Congratulations! You genesiscloud instance has been created!")
print("You can ssh to it with:")
print(f"ssh -l ubuntu {public_ip}")
print("Some interesting commands to try at first:")
print("cloud-init stats # if this is still running, NVIDIA driver is still"
" installing")
print("use the following to see cloud-init output in real time:")
print("sudo tail -f /var/log/cloud-init-output.log")
return my_instance
def destroy(instance_id):
# finally destory this instance, when you no longer need it
client = Client(os.getenv("GENESISCLOUD_API_KEY"))
client.Instances.delete(id=instance_id)
if __name__ == "__main__":
instance = create_instance()
instance_id = instance['id']
# destroy(instance_id)
|
from __future__ import division
import numpy as np
from openmdao.api import ExplicitComponent
from openmdao.api import Group
class PowerSplit(ExplicitComponent):
"""
A power split mechanism for mechanical or electrical power.
Inputs
------
power_in : float
Power fed to the splitter. (vector, W)
power_rating : float
Maximum rated power of the split mechanism. (scalar, W)
power_split_fraction:
If ``'rule'`` is set to ``'fraction'``, sets percentage of input power directed
to Output A (minus losses). (vector, dimensionless)
power_split_amount:
If ``'rule'`` is set to ``'fixed'``, sets amount of input power to Output A (minus
losses). (vector, W)
Outputs
-------
power_out_A : float
Power sent to first output (vector, W)
power_out_B : float
Power sent to second output (vector, W)
heat_out : float
Waste heat produced (vector, W)
component_cost : float
Nonrecurring cost of the component (scalar, USD)
component_weight : float
Weight of the component (scalar, kg)
component_sizing_margin : float
Equal to 1 when fed full rated power (vector, dimensionless)
Options
-------
num_nodes : int
Number of analysis points to run (sets vec length; default 1)
rule : str
Power split control rule to use; either ``'fixed'`` where a set
amount of power is sent to Output A or ``'fraction'`` where a
fraction of the total power is sent to Output A
efficiency : float
Component efficiency (default 1)
weight_inc : float
Weight per unit rated power
(default 0, kg/W)
weight_base : float
Base weight
(default 0, kg)
cost_inc : float
Nonrecurring cost per unit power
(default 0, USD/W)
cost_base : float
Base cost
(default 0 USD)
"""
def initialize(self):
# define control rules
self.options.declare('num_nodes', default=1, desc='Number of flight/control conditions')
self.options.declare('rule', default='fraction',
desc='Control strategy - fraction or fixed power')
self.options.declare('efficiency', default=1., desc='Efficiency (dimensionless)')
self.options.declare('weight_inc', default=0., desc='kg per input watt')
self.options.declare('weight_base', default=0., desc='kg base weight')
self.options.declare('cost_inc', default=0., desc='$ cost per input watt')
self.options.declare('cost_base', default=0., desc='$ cost base')
def setup(self):
nn = self.options['num_nodes']
self.add_input('power_in', units='W',
desc='Input shaft power or incoming electrical load', shape=(nn,))
self.add_input('power_rating', val=99999999, units='W', desc='Split mechanism power rating')
rule = self.options['rule']
if rule == 'fraction':
self.add_input('power_split_fraction', val=0.5,
desc='Fraction of power to output A', shape=(nn,))
elif rule == 'fixed':
self.add_input('power_split_amount', units='W',
desc='Raw amount of power to output A', shape=(nn,))
else:
msg = 'Specify either "fraction" or "fixed" as power split control rule'
raise ValueError(msg)
eta = self.options['efficiency']
weight_inc = self.options['weight_inc']
weight_base = self.options['weight_base']
cost_inc = self.options['cost_inc']
cost_base = self.options['cost_base']
self.add_output('power_out_A', units='W', desc='Output power or load to A', shape=(nn,))
self.add_output('power_out_B', units='W', desc='Output power or load to B', shape=(nn,))
self.add_output('heat_out', units='W', desc='Waste heat out', shape=(nn,))
self.add_output('component_cost', units='USD', desc='Splitter component cost')
self.add_output('component_weight', units='kg', desc='Splitter component weight')
self.add_output('component_sizing_margin', desc='Fraction of rated power', shape=(nn,))
if rule == 'fraction':
self.declare_partials(['power_out_A', 'power_out_B'],
['power_in', 'power_split_fraction'],
rows=range(nn), cols=range(nn))
elif rule == 'fixed':
self.declare_partials(['power_out_A', 'power_out_B'],
['power_in', 'power_split_amount'],
rows=range(nn), cols=range(nn))
self.declare_partials('heat_out', 'power_in', val=(1 - eta) * np.ones(nn),
rows=range(nn), cols=range(nn))
self.declare_partials('component_cost', 'power_rating', val=cost_inc)
self.declare_partials('component_weight', 'power_rating', val=weight_inc)
self.declare_partials('component_sizing_margin', 'power_in',
rows=range(nn), cols=range(nn))
self.declare_partials('component_sizing_margin', 'power_rating')
def compute(self, inputs, outputs):
nn = self.options['num_nodes']
rule = self.options['rule']
eta = self.options['efficiency']
weight_inc = self.options['weight_inc']
weight_base = self.options['weight_base']
cost_inc = self.options['cost_inc']
cost_base = self.options['cost_base']
if rule == 'fraction':
outputs['power_out_A'] = inputs['power_in'] * inputs['power_split_fraction'] * eta
outputs['power_out_B'] = inputs['power_in'] * (1 - inputs['power_split_fraction']) * eta
elif rule == 'fixed':
# check to make sure enough power is available
# if inputs['power_in'] < inputs['power_split_amount']:
not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])
po_A = np.zeros(nn)
po_B = np.zeros(nn)
po_A[not_enough_idx] = inputs['power_in'][not_enough_idx] * eta
po_B[not_enough_idx] = np.zeros(nn)[not_enough_idx]
# else:
enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])
po_A[enough_idx] = inputs['power_split_amount'][enough_idx] * eta
po_B[enough_idx] = (inputs['power_in'][enough_idx] -
inputs['power_split_amount'][enough_idx]) * eta
outputs['power_out_A'] = po_A
outputs['power_out_B'] = po_B
outputs['heat_out'] = inputs['power_in'] * (1 - eta)
outputs['component_cost'] = inputs['power_rating'] * cost_inc + cost_base
outputs['component_weight'] = inputs['power_rating'] * weight_inc + weight_base
outputs['component_sizing_margin'] = inputs['power_in'] / inputs['power_rating']
def compute_partials(self, inputs, J):
nn = self.options['num_nodes']
rule = self.options['rule']
eta = self.options['efficiency']
if rule == 'fraction':
J['power_out_A', 'power_in'] = inputs['power_split_fraction'] * eta
J['power_out_A', 'power_split_fraction'] = inputs['power_in'] * eta
J['power_out_B', 'power_in'] = (1 - inputs['power_split_fraction']) * eta
J['power_out_B', 'power_split_fraction'] = -inputs['power_in'] * eta
elif rule == 'fixed':
not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])
enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])
# if inputs['power_in'] < inputs['power_split_amount']:
Jpo_A_pi = np.zeros(nn)
Jpo_A_ps = np.zeros(nn)
Jpo_B_pi = np.zeros(nn)
Jpo_B_ps = np.zeros(nn)
Jpo_A_pi[not_enough_idx] = eta * np.ones(nn)[not_enough_idx]
Jpo_A_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]
Jpo_B_pi[not_enough_idx] = np.zeros(nn)[not_enough_idx]
Jpo_B_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]
# else:
Jpo_A_ps[enough_idx] = eta * np.ones(nn)[enough_idx]
Jpo_A_pi[enough_idx] = np.zeros(nn)[enough_idx]
Jpo_B_ps[enough_idx] = -eta * np.ones(nn)[enough_idx]
Jpo_B_pi[enough_idx] = eta * np.ones(nn)[enough_idx]
J['power_out_A', 'power_in'] = Jpo_A_pi
J['power_out_A', 'power_split_amount'] = Jpo_A_ps
J['power_out_B', 'power_in'] = Jpo_B_pi
J['power_out_B', 'power_split_amount'] = Jpo_B_ps
J['component_sizing_margin', 'power_in'] = 1 / inputs['power_rating']
J['component_sizing_margin', 'power_rating'] = - (inputs['power_in'] /
inputs['power_rating'] ** 2)
class FlowSplit(ExplicitComponent):
"""
Split incoming flow from one inlet into two outlets at a fractional ratio.
Inputs
------
mdot_in : float
Mass flow rate of incoming fluid (vector, kg/s)
mdot_split_fraction : float
Fraction of incoming mass flow directed to output A, must be in
range 0-1 inclusive (vector, dimensionless)
Outputs
-------
mdot_out_A : float
Mass flow rate directed to first output (vector, kg/s)
mdot_out_B : float
Mass flow rate directed to second output (vector, kg/s)
Options
-------
num_nodes : int
Number of analysis points to run (sets vec length; default 1)
"""
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of analysis points')
def setup(self):
nn = self.options['num_nodes']
rng = np.arange(0, nn)
self.add_input('mdot_in', units='kg/s', shape=(nn,))
self.add_input('mdot_split_fraction', units=None, shape=(nn,), val=0.5)
self.add_output('mdot_out_A', units='kg/s', shape=(nn,))
self.add_output('mdot_out_B', units='kg/s', shape=(nn,))
self.declare_partials(['mdot_out_A'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)
self.declare_partials(['mdot_out_B'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)
def compute(self, inputs, outputs):
if np.any(inputs['mdot_split_fraction'] < 0) or np.any(inputs['mdot_split_fraction'] > 1):
raise RuntimeWarning(f"mdot_split_fraction of {inputs["mdot_split_fraction"]} has at least one element out of range [0, 1]")
outputs['mdot_out_A'] = inputs['mdot_in'] * inputs['mdot_split_fraction']
outputs['mdot_out_B'] = inputs['mdot_in'] * (1 - inputs['mdot_split_fraction'])
def compute_partials(self, inputs, J):
J['mdot_out_A', 'mdot_in'] = inputs['mdot_split_fraction']
J['mdot_out_A', 'mdot_split_fraction'] = inputs['mdot_in']
J['mdot_out_B', 'mdot_in'] = 1 - inputs['mdot_split_fraction']
J['mdot_out_B', 'mdot_split_fraction'] = - inputs['mdot_in']
class FlowCombine(ExplicitComponent):
"""
Combines two incoming flows into a single outgoing flow and does a weighted average
of their temperatures based on the mass flow rate of each to compute the outlet temp.
Inputs
------
mdot_in_A : float
Mass flow rate of fluid from first inlet, should be nonegative (vector, kg/s)
mdot_in_B : float
Mass flow rate of fluid from second inlet, should be nonnegative (vector, kg/s)
T_in_A : float
Temperature of fluid from first inlet (vector, K)
T_in_B : float
Temperature of fluid from second inlet (vector, K)
Outputs
-------
mdot_out : float
Outgoing fluid mass flow rate (vector, kg/s)
T_out : float
Outgoing fluid temperature (vector, K)
Options
-------
num_nodes : int
Number of analysis points (scalar, default 1)
"""
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of analysis points')
def setup(self):
nn = self.options['num_nodes']
rng = np.arange(0, nn)
self.add_input('mdot_in_A', units='kg/s', shape=(nn,))
self.add_input('mdot_in_B', units='kg/s', shape=(nn,))
self.add_input('T_in_A', units='K', shape=(nn,))
self.add_input('T_in_B', units='K', shape=(nn,))
self.add_output('mdot_out', units='kg/s', shape=(nn,))
self.add_output('T_out', units='K', shape=(nn,))
self.declare_partials(['mdot_out'], ['mdot_in_A', 'mdot_in_B'], rows=rng, cols=rng)
self.declare_partials(['T_out'], ['mdot_in_A', 'mdot_in_B', 'T_in_A', 'T_in_B'], rows=rng, cols=rng)
def compute(self, inputs, outputs):
mdot_A = inputs['mdot_in_A']
mdot_B = inputs['mdot_in_B']
outputs['mdot_out'] = mdot_A + mdot_B
# Weighted average of temperatures for output temperature
outputs['T_out'] = (mdot_A * inputs['T_in_A'] + mdot_B * inputs['T_in_B']) / (mdot_A + mdot_B)
def compute_partials(self, inputs, J):
nn = self.options['num_nodes']
J['mdot_out', 'mdot_in_A'] = np.ones((nn,))
J['mdot_out', 'mdot_in_B'] = np.ones((nn,))
mdot_A = inputs['mdot_in_A']
mdot_B = inputs['mdot_in_B']
mdot = mdot_A + mdot_B
T_A = inputs['T_in_A']
T_B = inputs['T_in_B']
J['T_out', 'mdot_in_A'] = (mdot * T_A - mdot_A * T_A - mdot_B * T_B) / (mdot**2)
J['T_out', 'mdot_in_B'] = (mdot * T_B - mdot_A * T_A - mdot_B * T_B) / (mdot**2)
J['T_out', 'T_in_A'] = mdot_A / mdot
J['T_out', 'T_in_B'] = mdot_B / mdot
|
from __future__ import division
import numpy as np
from openmdao.api import ExplicitComponent
from openmdao.api import Group
class PowerSplit(ExplicitComponent):
"""
A power split mechanism for mechanical or electrical power.
Inputs
------
power_in : float
Power fed to the splitter. (vector, W)
power_rating : float
Maximum rated power of the split mechanism. (scalar, W)
power_split_fraction:
If ``'rule'`` is set to ``'fraction'``, sets percentage of input power directed
to Output A (minus losses). (vector, dimensionless)
power_split_amount:
If ``'rule'`` is set to ``'fixed'``, sets amount of input power to Output A (minus
losses). (vector, W)
Outputs
-------
power_out_A : float
Power sent to first output (vector, W)
power_out_B : float
Power sent to second output (vector, W)
heat_out : float
Waste heat produced (vector, W)
component_cost : float
Nonrecurring cost of the component (scalar, USD)
component_weight : float
Weight of the component (scalar, kg)
component_sizing_margin : float
Equal to 1 when fed full rated power (vector, dimensionless)
Options
-------
num_nodes : int
Number of analysis points to run (sets vec length; default 1)
rule : str
Power split control rule to use; either ``'fixed'`` where a set
amount of power is sent to Output A or ``'fraction'`` where a
fraction of the total power is sent to Output A
efficiency : float
Component efficiency (default 1)
weight_inc : float
Weight per unit rated power
(default 0, kg/W)
weight_base : float
Base weight
(default 0, kg)
cost_inc : float
Nonrecurring cost per unit power
(default 0, USD/W)
cost_base : float
Base cost
(default 0 USD)
"""
def initialize(self):
# define control rules
self.options.declare('num_nodes', default=1, desc='Number of flight/control conditions')
self.options.declare('rule', default='fraction',
desc='Control strategy - fraction or fixed power')
self.options.declare('efficiency', default=1., desc='Efficiency (dimensionless)')
self.options.declare('weight_inc', default=0., desc='kg per input watt')
self.options.declare('weight_base', default=0., desc='kg base weight')
self.options.declare('cost_inc', default=0., desc='$ cost per input watt')
self.options.declare('cost_base', default=0., desc='$ cost base')
def setup(self):
nn = self.options['num_nodes']
self.add_input('power_in', units='W',
desc='Input shaft power or incoming electrical load', shape=(nn,))
self.add_input('power_rating', val=99999999, units='W', desc='Split mechanism power rating')
rule = self.options['rule']
if rule == 'fraction':
self.add_input('power_split_fraction', val=0.5,
desc='Fraction of power to output A', shape=(nn,))
elif rule == 'fixed':
self.add_input('power_split_amount', units='W',
desc='Raw amount of power to output A', shape=(nn,))
else:
msg = 'Specify either "fraction" or "fixed" as power split control rule'
raise ValueError(msg)
eta = self.options['efficiency']
weight_inc = self.options['weight_inc']
weight_base = self.options['weight_base']
cost_inc = self.options['cost_inc']
cost_base = self.options['cost_base']
self.add_output('power_out_A', units='W', desc='Output power or load to A', shape=(nn,))
self.add_output('power_out_B', units='W', desc='Output power or load to B', shape=(nn,))
self.add_output('heat_out', units='W', desc='Waste heat out', shape=(nn,))
self.add_output('component_cost', units='USD', desc='Splitter component cost')
self.add_output('component_weight', units='kg', desc='Splitter component weight')
self.add_output('component_sizing_margin', desc='Fraction of rated power', shape=(nn,))
if rule == 'fraction':
self.declare_partials(['power_out_A', 'power_out_B'],
['power_in', 'power_split_fraction'],
rows=range(nn), cols=range(nn))
elif rule == 'fixed':
self.declare_partials(['power_out_A', 'power_out_B'],
['power_in', 'power_split_amount'],
rows=range(nn), cols=range(nn))
self.declare_partials('heat_out', 'power_in', val=(1 - eta) * np.ones(nn),
rows=range(nn), cols=range(nn))
self.declare_partials('component_cost', 'power_rating', val=cost_inc)
self.declare_partials('component_weight', 'power_rating', val=weight_inc)
self.declare_partials('component_sizing_margin', 'power_in',
rows=range(nn), cols=range(nn))
self.declare_partials('component_sizing_margin', 'power_rating')
def compute(self, inputs, outputs):
nn = self.options['num_nodes']
rule = self.options['rule']
eta = self.options['efficiency']
weight_inc = self.options['weight_inc']
weight_base = self.options['weight_base']
cost_inc = self.options['cost_inc']
cost_base = self.options['cost_base']
if rule == 'fraction':
outputs['power_out_A'] = inputs['power_in'] * inputs['power_split_fraction'] * eta
outputs['power_out_B'] = inputs['power_in'] * (1 - inputs['power_split_fraction']) * eta
elif rule == 'fixed':
# check to make sure enough power is available
# if inputs['power_in'] < inputs['power_split_amount']:
not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])
po_A = np.zeros(nn)
po_B = np.zeros(nn)
po_A[not_enough_idx] = inputs['power_in'][not_enough_idx] * eta
po_B[not_enough_idx] = np.zeros(nn)[not_enough_idx]
# else:
enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])
po_A[enough_idx] = inputs['power_split_amount'][enough_idx] * eta
po_B[enough_idx] = (inputs['power_in'][enough_idx] -
inputs['power_split_amount'][enough_idx]) * eta
outputs['power_out_A'] = po_A
outputs['power_out_B'] = po_B
outputs['heat_out'] = inputs['power_in'] * (1 - eta)
outputs['component_cost'] = inputs['power_rating'] * cost_inc + cost_base
outputs['component_weight'] = inputs['power_rating'] * weight_inc + weight_base
outputs['component_sizing_margin'] = inputs['power_in'] / inputs['power_rating']
def compute_partials(self, inputs, J):
nn = self.options['num_nodes']
rule = self.options['rule']
eta = self.options['efficiency']
if rule == 'fraction':
J['power_out_A', 'power_in'] = inputs['power_split_fraction'] * eta
J['power_out_A', 'power_split_fraction'] = inputs['power_in'] * eta
J['power_out_B', 'power_in'] = (1 - inputs['power_split_fraction']) * eta
J['power_out_B', 'power_split_fraction'] = -inputs['power_in'] * eta
elif rule == 'fixed':
not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])
enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])
# if inputs['power_in'] < inputs['power_split_amount']:
Jpo_A_pi = np.zeros(nn)
Jpo_A_ps = np.zeros(nn)
Jpo_B_pi = np.zeros(nn)
Jpo_B_ps = np.zeros(nn)
Jpo_A_pi[not_enough_idx] = eta * np.ones(nn)[not_enough_idx]
Jpo_A_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]
Jpo_B_pi[not_enough_idx] = np.zeros(nn)[not_enough_idx]
Jpo_B_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]
# else:
Jpo_A_ps[enough_idx] = eta * np.ones(nn)[enough_idx]
Jpo_A_pi[enough_idx] = np.zeros(nn)[enough_idx]
Jpo_B_ps[enough_idx] = -eta * np.ones(nn)[enough_idx]
Jpo_B_pi[enough_idx] = eta * np.ones(nn)[enough_idx]
J['power_out_A', 'power_in'] = Jpo_A_pi
J['power_out_A', 'power_split_amount'] = Jpo_A_ps
J['power_out_B', 'power_in'] = Jpo_B_pi
J['power_out_B', 'power_split_amount'] = Jpo_B_ps
J['component_sizing_margin', 'power_in'] = 1 / inputs['power_rating']
J['component_sizing_margin', 'power_rating'] = - (inputs['power_in'] /
inputs['power_rating'] ** 2)
class FlowSplit(ExplicitComponent):
"""
Split incoming flow from one inlet into two outlets at a fractional ratio.
Inputs
------
mdot_in : float
Mass flow rate of incoming fluid (vector, kg/s)
mdot_split_fraction : float
Fraction of incoming mass flow directed to output A, must be in
range 0-1 inclusive (vector, dimensionless)
Outputs
-------
mdot_out_A : float
Mass flow rate directed to first output (vector, kg/s)
mdot_out_B : float
Mass flow rate directed to second output (vector, kg/s)
Options
-------
num_nodes : int
Number of analysis points to run (sets vec length; default 1)
"""
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of analysis points')
def setup(self):
nn = self.options['num_nodes']
rng = np.arange(0, nn)
self.add_input('mdot_in', units='kg/s', shape=(nn,))
self.add_input('mdot_split_fraction', units=None, shape=(nn,), val=0.5)
self.add_output('mdot_out_A', units='kg/s', shape=(nn,))
self.add_output('mdot_out_B', units='kg/s', shape=(nn,))
self.declare_partials(['mdot_out_A'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)
self.declare_partials(['mdot_out_B'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)
def compute(self, inputs, outputs):
if np.any(inputs['mdot_split_fraction'] < 0) or np.any(inputs['mdot_split_fraction'] > 1):
raise RuntimeWarning(f"mdot_split_fraction of {inputs['mdot_split_fraction']} has at least one element out of range [0, 1]")
outputs['mdot_out_A'] = inputs['mdot_in'] * inputs['mdot_split_fraction']
outputs['mdot_out_B'] = inputs['mdot_in'] * (1 - inputs['mdot_split_fraction'])
def compute_partials(self, inputs, J):
J['mdot_out_A', 'mdot_in'] = inputs['mdot_split_fraction']
J['mdot_out_A', 'mdot_split_fraction'] = inputs['mdot_in']
J['mdot_out_B', 'mdot_in'] = 1 - inputs['mdot_split_fraction']
J['mdot_out_B', 'mdot_split_fraction'] = - inputs['mdot_in']
class FlowCombine(ExplicitComponent):
"""
Combines two incoming flows into a single outgoing flow and does a weighted average
of their temperatures based on the mass flow rate of each to compute the outlet temp.
Inputs
------
mdot_in_A : float
Mass flow rate of fluid from first inlet, should be nonegative (vector, kg/s)
mdot_in_B : float
Mass flow rate of fluid from second inlet, should be nonnegative (vector, kg/s)
T_in_A : float
Temperature of fluid from first inlet (vector, K)
T_in_B : float
Temperature of fluid from second inlet (vector, K)
Outputs
-------
mdot_out : float
Outgoing fluid mass flow rate (vector, kg/s)
T_out : float
Outgoing fluid temperature (vector, K)
Options
-------
num_nodes : int
Number of analysis points (scalar, default 1)
"""
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of analysis points')
def setup(self):
nn = self.options['num_nodes']
rng = np.arange(0, nn)
self.add_input('mdot_in_A', units='kg/s', shape=(nn,))
self.add_input('mdot_in_B', units='kg/s', shape=(nn,))
self.add_input('T_in_A', units='K', shape=(nn,))
self.add_input('T_in_B', units='K', shape=(nn,))
self.add_output('mdot_out', units='kg/s', shape=(nn,))
self.add_output('T_out', units='K', shape=(nn,))
self.declare_partials(['mdot_out'], ['mdot_in_A', 'mdot_in_B'], rows=rng, cols=rng)
self.declare_partials(['T_out'], ['mdot_in_A', 'mdot_in_B', 'T_in_A', 'T_in_B'], rows=rng, cols=rng)
def compute(self, inputs, outputs):
mdot_A = inputs['mdot_in_A']
mdot_B = inputs['mdot_in_B']
outputs['mdot_out'] = mdot_A + mdot_B
# Weighted average of temperatures for output temperature
outputs['T_out'] = (mdot_A * inputs['T_in_A'] + mdot_B * inputs['T_in_B']) / (mdot_A + mdot_B)
def compute_partials(self, inputs, J):
nn = self.options['num_nodes']
J['mdot_out', 'mdot_in_A'] = np.ones((nn,))
J['mdot_out', 'mdot_in_B'] = np.ones((nn,))
mdot_A = inputs['mdot_in_A']
mdot_B = inputs['mdot_in_B']
mdot = mdot_A + mdot_B
T_A = inputs['T_in_A']
T_B = inputs['T_in_B']
J['T_out', 'mdot_in_A'] = (mdot * T_A - mdot_A * T_A - mdot_B * T_B) / (mdot**2)
J['T_out', 'mdot_in_B'] = (mdot * T_B - mdot_A * T_A - mdot_B * T_B) / (mdot**2)
J['T_out', 'T_in_A'] = mdot_A / mdot
J['T_out', 'T_in_B'] = mdot_B / mdot
|
from enum import Enum
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
class Action(Enum):
decrease_attention = 0
increase_attention = 1
access_detector = 2
isolate_node = 3
forget_node = 4
class State(Enum):
healthy = 0
infected = 1
class MalwareEnv(gym.Env):
"""
Observations:
Type: Box(2)
Num Observation Min Max
0 Attention Level 0.05 1.0
1 Malware Rate 0.0 1.0
Actions:
Type: Discrete(5)
Num Action
0 Decrease attention
1 Increase attention
2 Access detector
3 Isolate node
4 Forget node
Reward:
Reward of -0.1 is awarded for accessing detector.
Reward of -0.2 is awarded for decreasing attention.
Reward of -0.8 is awarded for increasing attention.
Reward of 1 is awarded for isolation of infected node.
Reward of 1 is awarded for forgeting healthy node.
Reward of -1 is awarded for isolation of healthy node.
Reward of -1 if awarded for forgetting infected node.
Starting State:
Attention level is set between [0.1, 0.2]
Actual state is set either to 'healthy' or 'infected'.
Episode Termination:
Node is either isolated of forgotten.
Episode length is greater than 100.
"""
def __init__(self, malware_prob: float = 0.9, seed: int = 100, log: bool = False):
self.min_attention = 0.05
self.max_attention = 1.0
self.min_rate = 0.0
self.max_rate = 1.0
self.attention_inc = 0.05
self.low = np.array([self.min_attention, self.min_rate], dtype=np.float32)
self.high = np.array([self.max_attention, self.max_rate], dtype=np.float32)
self.action_space = spaces.Discrete(5)
self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32)
self.malware_prob = malware_prob
self.log = log
# (attention, health)
self.state = (None, None, None)
self.latest_action = None
self.actions = []
self.seed(seed)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def start_step_policy(self, observation):
attention, malware_rate = observation
if attention > self.min_attention:
return Action.access_detector.value
return Action.increase_attention.value
def step(self, action):
if isinstance(action, np.ndarray):
action = np.argmax(action)
assert self.action_space.contains(action), f"{action} ({type(action)}) invalid"
action = Action(action)
if self.log:
self.actions.append(action)
attention, malware_rate, health = self.state
st = State(health)
if action == Action.decrease_attention:
attention = max(self.min_attention, attention - self.attention_inc)
if action == Action.increase_attention:
attention = min(self.max_attention, attention + self.attention_inc)
if action == Action.access_detector:
# Accessing a detector changes malware rate.
#
# When the node is healthy, there is a `1 - malware_prob` probability
# to observe malware. And malware rate depends on the attention level.
#
# Throw a "dice" in order to calculate the malware rate.
prob = self.np_random.uniform()
T = (1 - self.malware_prob) if st == State.healthy else self.malware_prob
mu = np.average([0, attention])
# sigma = 0.2
malware_rate = 0 if prob > T else self.np_random.normal(mu, 0.01)
malware_rate = max(self.min_rate, malware_rate)
malware_rate = min(self.max_rate, malware_rate)
# Agent does not observe the node health directly, only through
# malware rate.
self.state = np.array([attention, malware_rate, health])
self.latest_action = action
observation = np.array([attention, malware_rate])
reward = self.compute_reward(health, action)
done = action in {Action.isolate_node, Action.forget_node}
return observation, reward, done, {} # {"state": self.state}
def compute_reward(self, health, action):
if action == Action.decrease_attention:
return -0.2
if action == Action.increase_attention:
return -0.8
if action == Action.access_detector:
return -0.1
if action == Action.isolate_node:
return 1 * (health * 2 - 1)
if action == Action.forget_node:
return -1 * (health * 2 - 1)
return 0
def reset(self):
# Node if either healthy (0), or infected (1), when node is infected,
# agent observes malware requests depending on the attention level.
health = self.np_random.choice([0, 1])
attention = self.min_attention
malware_rate = 0
self.state = np.array([attention, malware_rate, health])
return np.array([attention, malware_rate])
def render(self, mode="human"):
attention, malware_rate, infected = self.state
print(f"\tattention: {attention} - malware rate: {malware_rate}", end=" - ")
print(f"health: {"infected" if infected else "healthy"}", end=" - ")
print(f"action: {self.latest_action}")
def close(self):
pass
|
from enum import Enum
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
class Action(Enum):
decrease_attention = 0
increase_attention = 1
access_detector = 2
isolate_node = 3
forget_node = 4
class State(Enum):
healthy = 0
infected = 1
class MalwareEnv(gym.Env):
"""
Observations:
Type: Box(2)
Num Observation Min Max
0 Attention Level 0.05 1.0
1 Malware Rate 0.0 1.0
Actions:
Type: Discrete(5)
Num Action
0 Decrease attention
1 Increase attention
2 Access detector
3 Isolate node
4 Forget node
Reward:
Reward of -0.1 is awarded for accessing detector.
Reward of -0.2 is awarded for decreasing attention.
Reward of -0.8 is awarded for increasing attention.
Reward of 1 is awarded for isolation of infected node.
Reward of 1 is awarded for forgeting healthy node.
Reward of -1 is awarded for isolation of healthy node.
Reward of -1 if awarded for forgetting infected node.
Starting State:
Attention level is set between [0.1, 0.2]
Actual state is set either to 'healthy' or 'infected'.
Episode Termination:
Node is either isolated of forgotten.
Episode length is greater than 100.
"""
def __init__(self, malware_prob: float = 0.9, seed: int = 100, log: bool = False):
self.min_attention = 0.05
self.max_attention = 1.0
self.min_rate = 0.0
self.max_rate = 1.0
self.attention_inc = 0.05
self.low = np.array([self.min_attention, self.min_rate], dtype=np.float32)
self.high = np.array([self.max_attention, self.max_rate], dtype=np.float32)
self.action_space = spaces.Discrete(5)
self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32)
self.malware_prob = malware_prob
self.log = log
# (attention, health)
self.state = (None, None, None)
self.latest_action = None
self.actions = []
self.seed(seed)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def start_step_policy(self, observation):
attention, malware_rate = observation
if attention > self.min_attention:
return Action.access_detector.value
return Action.increase_attention.value
def step(self, action):
if isinstance(action, np.ndarray):
action = np.argmax(action)
assert self.action_space.contains(action), f"{action} ({type(action)}) invalid"
action = Action(action)
if self.log:
self.actions.append(action)
attention, malware_rate, health = self.state
st = State(health)
if action == Action.decrease_attention:
attention = max(self.min_attention, attention - self.attention_inc)
if action == Action.increase_attention:
attention = min(self.max_attention, attention + self.attention_inc)
if action == Action.access_detector:
# Accessing a detector changes malware rate.
#
# When the node is healthy, there is a `1 - malware_prob` probability
# to observe malware. And malware rate depends on the attention level.
#
# Throw a "dice" in order to calculate the malware rate.
prob = self.np_random.uniform()
T = (1 - self.malware_prob) if st == State.healthy else self.malware_prob
mu = np.average([0, attention])
# sigma = 0.2
malware_rate = 0 if prob > T else self.np_random.normal(mu, 0.01)
malware_rate = max(self.min_rate, malware_rate)
malware_rate = min(self.max_rate, malware_rate)
# Agent does not observe the node health directly, only through
# malware rate.
self.state = np.array([attention, malware_rate, health])
self.latest_action = action
observation = np.array([attention, malware_rate])
reward = self.compute_reward(health, action)
done = action in {Action.isolate_node, Action.forget_node}
return observation, reward, done, {} # {"state": self.state}
def compute_reward(self, health, action):
if action == Action.decrease_attention:
return -0.2
if action == Action.increase_attention:
return -0.8
if action == Action.access_detector:
return -0.1
if action == Action.isolate_node:
return 1 * (health * 2 - 1)
if action == Action.forget_node:
return -1 * (health * 2 - 1)
return 0
def reset(self):
# Node if either healthy (0), or infected (1), when node is infected,
# agent observes malware requests depending on the attention level.
health = self.np_random.choice([0, 1])
attention = self.min_attention
malware_rate = 0
self.state = np.array([attention, malware_rate, health])
return np.array([attention, malware_rate])
def render(self, mode="human"):
attention, malware_rate, infected = self.state
print(f"\tattention: {attention} - malware rate: {malware_rate}", end=" - ")
print(f"health: {'infected' if infected else 'healthy'}", end=" - ")
print(f"action: {self.latest_action}")
def close(self):
pass
|
# Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument, _format_arg
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
def wrapped_named_tup(p1, *, p2):
return p1.x + p2.y
wrap(wrapped_named_tup)
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
def _custom_fx_repr_fn(self) -> str:
return f"Pair(x={_format_arg(self.x)}, y={_format_arg(self.y)})"
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_trace_buffer_slice(self):
bs, d_hid = 10, 23
class ExampleCode(torch.nn.Module):
def __init__(self):
super().__init__()
self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin = torch.nn.Linear(d_hid, d_hid)
self.register_buffer('buffer', torch.randn(bs + 100, d_hid))
def forward(self, x):
x = torch.mm(x, self.mm_param)
skip_connection = x
x = torch.relu(x)
x = torch.mm(x, self.mm_param) + self.buffer[:x.shape[0]]
x = self.lin(x)
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
x = self.lin(x)
return x
ec = ExampleCode()
traced = torch.fx.symbolic_trace(ec)
x = torch.randn(bs, d_hid)
torch.testing.assert_allclose(ec(x), traced(x))
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_remove_uses_with_custom_filter(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu, lambda x: x != neg)
self.assertTrue(neg in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_immutable_list_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
l = immutable_list([3, [rand_tensor, 42]])
flattened, spec = pytree.tree_flatten(l)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == l
assert isinstance(unflattened, immutable_list)
def test_immutable_dict_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
d = immutable_dict({'a': 3, 'b': [rand_tensor, 42]})
flattened, spec = pytree.tree_flatten(d)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == d
assert isinstance(unflattened, immutable_dict)
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_named_tuple_inlined(self):
class NamedTupMod(torch.nn.Module):
def forward(self, inp):
return wrapped_named_tup(Pair(inp, 1.2), p2=Pair(3.4, inp))
m = NamedTupMod()
input = torch.rand(3, 4)
ref = m(input)
traced = symbolic_trace(m)
res = traced(input)
self.assertEqual(ref, res)
# Check Pair NamedTuple works when inlined into the function call.
ph = call_func = None
for node in traced.graph.nodes:
if node.op == "placeholder":
ph = node
elif node.op == "call_function" and node.target == wrapped_named_tup:
node.update_arg(0, Pair(ph, 1.2))
node.update_kwarg("p2", Pair(3.4, ph))
call_func = node
break
self.assertTrue(call_func is not None)
self.assertTrue(isinstance(call_func.args[0], Pair))
self.assertTrue(isinstance(call_func.kwargs["p2"], Pair))
self.assertEqual(_format_arg(call_func.args[0]), "Pair(x=%inp, y=1.2)")
self.assertEqual(_format_arg(call_func.kwargs["p2"]), "Pair(x=3.4, y=%inp)")
traced.graph.eliminate_dead_code()
traced.recompile()
res = traced(input)
self.assertEqual(ref, res)
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({', '.join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{', '.join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{', '.join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
# Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument, _format_arg
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
def wrapped_named_tup(p1, *, p2):
return p1.x + p2.y
wrap(wrapped_named_tup)
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
def _custom_fx_repr_fn(self) -> str:
return f"Pair(x={_format_arg(self.x)}, y={_format_arg(self.y)})"
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_trace_buffer_slice(self):
bs, d_hid = 10, 23
class ExampleCode(torch.nn.Module):
def __init__(self):
super().__init__()
self.mm_param = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param2 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.lin = torch.nn.Linear(d_hid, d_hid)
self.register_buffer('buffer', torch.randn(bs + 100, d_hid))
def forward(self, x):
x = torch.mm(x, self.mm_param)
skip_connection = x
x = torch.relu(x)
x = torch.mm(x, self.mm_param) + self.buffer[:x.shape[0]]
x = self.lin(x)
x = torch.relu(x)
x = x + skip_connection
x = torch.mm(x, self.mm_param2)
x = self.lin(x)
return x
ec = ExampleCode()
traced = torch.fx.symbolic_trace(ec)
x = torch.randn(bs, d_hid)
torch.testing.assert_allclose(ec(x), traced(x))
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_remove_uses_with_custom_filter(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu, lambda x: x != neg)
self.assertTrue(neg in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_immutable_list_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
l = immutable_list([3, [rand_tensor, 42]])
flattened, spec = pytree.tree_flatten(l)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == l
assert isinstance(unflattened, immutable_list)
def test_immutable_dict_pytree_ops(self):
rand_tensor = torch.randn(5, 3)
d = immutable_dict({'a': 3, 'b': [rand_tensor, 42]})
flattened, spec = pytree.tree_flatten(d)
assert flattened == [3, rand_tensor, 42]
unflattened = pytree.tree_unflatten(flattened, spec)
assert unflattened == d
assert isinstance(unflattened, immutable_dict)
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_named_tuple_inlined(self):
class NamedTupMod(torch.nn.Module):
def forward(self, inp):
return wrapped_named_tup(Pair(inp, 1.2), p2=Pair(3.4, inp))
m = NamedTupMod()
input = torch.rand(3, 4)
ref = m(input)
traced = symbolic_trace(m)
res = traced(input)
self.assertEqual(ref, res)
# Check Pair NamedTuple works when inlined into the function call.
ph = call_func = None
for node in traced.graph.nodes:
if node.op == "placeholder":
ph = node
elif node.op == "call_function" and node.target == wrapped_named_tup:
node.update_arg(0, Pair(ph, 1.2))
node.update_kwarg("p2", Pair(3.4, ph))
call_func = node
break
self.assertTrue(call_func is not None)
self.assertTrue(isinstance(call_func.args[0], Pair))
self.assertTrue(isinstance(call_func.kwargs["p2"], Pair))
self.assertEqual(_format_arg(call_func.args[0]), "Pair(x=%inp, y=1.2)")
self.assertEqual(_format_arg(call_func.kwargs["p2"]), "Pair(x=3.4, y=%inp)")
traced.graph.eliminate_dead_code()
traced.recompile()
res = traced(input)
self.assertEqual(ref, res)
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
# coding: utf-8
# Copyright (c) 2019-2020 Latona. All rights reserved.
import time
from aion.logger import lprint
from aion.microservice import Options, main_decorator
from .check import UpdateUsbStateToDB, UsbConnectionMonitor, DATABASE
SERVICE_NAME = "check-usb-storage-connection"
EXECUTE_INTERVAL = 5
def fillter_new_mountpoint(mountpoints, connected_usbs):
exist_mountpoints = list(map(lambda x: x['mountpoint'], connected_usbs))
new_mountpoints = []
for mount in mountpoints:
if mount not in exist_mountpoints:
new_mountpoints.append(mount)
return new_mountpoints
@main_decorator(SERVICE_NAME)
def main_without_kanban(opt: Options):
lprint("start main_with_kanban()")
# get cache kanban
conn = opt.get_conn()
num = opt.get_number()
# kanban = conn.get_one_kanban(SERVICE_NAME, num)
kanban = conn.set_kanban(SERVICE_NAME, num)
# main function #
usb = UsbConnectionMonitor()
while True:
is_change = False
mountpoints = usb.get_mount_points()
with UpdateUsbStateToDB() as db:
con_usbs = db.get_connected_usb_list()
# connected usb
new_mountpoints = fillter_new_mountpoint(mountpoints, con_usbs)
for mount in new_mountpoints:
db.update_usb_state(mount, 1)
lprint(f"found usb at:{mount}")
is_change = True
db.commit_query()
# unconnected usb
for conneted in con_usbs:
if conneted['mountpoint'] not in mountpoints:
db.update_unmounted_usb_state(conneted['usb_id'])
lprint(f"unconnected usb at: {conneted["mountpoint"]}")
is_change = True
db.commit_query()
if is_change:
# output after kanban
conn.output_kanban(
result=True,
process_number=num,
metadata={"mountpoints": mountpoints, "mode": "all",
"database": DATABASE, "table": "usbs"},
)
time.sleep(EXECUTE_INTERVAL)
if __name__ == "__main__":
main_without_kanban()
|
# coding: utf-8
# Copyright (c) 2019-2020 Latona. All rights reserved.
import time
from aion.logger import lprint
from aion.microservice import Options, main_decorator
from .check import UpdateUsbStateToDB, UsbConnectionMonitor, DATABASE
SERVICE_NAME = "check-usb-storage-connection"
EXECUTE_INTERVAL = 5
def fillter_new_mountpoint(mountpoints, connected_usbs):
exist_mountpoints = list(map(lambda x: x['mountpoint'], connected_usbs))
new_mountpoints = []
for mount in mountpoints:
if mount not in exist_mountpoints:
new_mountpoints.append(mount)
return new_mountpoints
@main_decorator(SERVICE_NAME)
def main_without_kanban(opt: Options):
lprint("start main_with_kanban()")
# get cache kanban
conn = opt.get_conn()
num = opt.get_number()
# kanban = conn.get_one_kanban(SERVICE_NAME, num)
kanban = conn.set_kanban(SERVICE_NAME, num)
# main function #
usb = UsbConnectionMonitor()
while True:
is_change = False
mountpoints = usb.get_mount_points()
with UpdateUsbStateToDB() as db:
con_usbs = db.get_connected_usb_list()
# connected usb
new_mountpoints = fillter_new_mountpoint(mountpoints, con_usbs)
for mount in new_mountpoints:
db.update_usb_state(mount, 1)
lprint(f"found usb at:{mount}")
is_change = True
db.commit_query()
# unconnected usb
for conneted in con_usbs:
if conneted['mountpoint'] not in mountpoints:
db.update_unmounted_usb_state(conneted['usb_id'])
lprint(f"unconnected usb at: {conneted['mountpoint']}")
is_change = True
db.commit_query()
if is_change:
# output after kanban
conn.output_kanban(
result=True,
process_number=num,
metadata={"mountpoints": mountpoints, "mode": "all",
"database": DATABASE, "table": "usbs"},
)
time.sleep(EXECUTE_INTERVAL)
if __name__ == "__main__":
main_without_kanban()
|
import os
import pandas as pd
import fsspec
import argparse
from src.defaults import args_info
env_vars = open("/content/credentials","r").read().split('\n')
for var in env_vars[:-1]:
key, value = var.split(' = ')
os.environ[key] = value
storage_options={'account_name':os.environ['ACCOUNT_NAME'],\
'account_key':os.environ['BLOB_KEY']}
fs = fsspec.filesystem('az', account_name=storage_options['account_name'], account_key=storage_options['account_key'])
##env data acquired
def return_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--data-src',
type=args_info["data_src"]["type"],
help=args_info["data_src"]["help"])
parser.add_argument('--write-to-csv',
action=args_info["write_to_csv"]["action"],
help=args_info["write_to_csv"]["help"])
return parser
if __name__ == "__main__":
args = return_parser().parse_args()
if args.data_src == 'usgs':
#USGS DATA PROCESS
data_src = 'usgs'
container = 'usgs-data'
station_url = f'az://{container}/{args.data_src}_station_metadata_raw.csv'
station_df = pd.read_csv(station_url, storage_options=storage_options)
sites_str = [str(f).zfill(8) for f in station_df.site_no]
station_df['sites_str'] = sites_str
query = []
for f in fs.ls(f'{container}/stations'):
station = os.path.basename(f).split('_')[0]
query.append(station)
q = pd.DataFrame({'sites_str':query})
out = station_df.merge(q, on='sites_str')
out['site_no'] = out['sites_str']
out = out[['site_no','site_name', 'Latitude', 'Longitude','geometry']]
if args.write_to_csv:
out.to_csv(f'az://{container}/usgs_station_metadata.csv',index=False, storage_options=storage_options)
if args.data_src == 'ana':
container = 'ana-data'
station_url = f'az://{container}/ana_station_metadata.csv'
station_df = pd.read_csv(station_url, storage_options=storage_options)
for site_no in station_df.site_no:
station_url = f'az://{container}/{site_no}.csv'
station_url2 = f'az://{container}/{site_no}_2.csv'
site_df1_raw = pd.read_csv(station_url, delimiter=',', skiprows=10, storage_options=storage_options)
translation = pd.read_csv(f'az://{container}/ana_translations.csv', storage_options=storage_options)
trans = {p:e for p,e in zip(translation.Portuguese, translation.English)}
site_df1 = site_df1_raw.rename(columns=trans)
site_df1 = site_df1.dropna(subset=['Date'])
site_df1['TimeL'] = site_df1['TimeL'].fillna('01/01/1900 01:00')
site_df1['Date-Time'] = [d for d in site_df1['Date']]
site_df1['Date-Time'] = pd.to_datetime(site_df1['Date-Time'],\
format='%d/%m/%Y')
site_df2_raw = pd.read_csv(station_url2, delimiter=',', skiprows=14, storage_options=storage_options)
site_df2_raw = site_df2_raw.replace('01/01/1900', '01/01/1900 01:00')
translation2 = {'Data':'Date','Hora':'Hour','Turbidez':'Turbidity'}
site_df2 = site_df2_raw.rename(columns=translation2)
site_df2 = site_df2.dropna(subset=['Date'])
site_df2['Date-Time-HM'] = [f"{d} {t.split(" ")[1]}" for d,t in zip(site_df2['Date'],site_df2['Hour'])]
site_df2['Date-Time'] = [d for d in site_df2['Date']]
site_df2['Date-Time'] = pd.to_datetime(site_df2['Date-Time'],\
format='%d/%m/%Y')
site_df2 = site_df2[['Date', 'Hour', 'Date-Time','Turbidity']]
selection = ['Date-Time', 'Discharge', 'Suspended Sediment Concentration (mg/L)', 'Turbidity']
site_df = site_df1.merge(site_df2, on='Date', how='outer', suffixes=('_',''))
site_df['Date-Time'] = site_df['Date-Time'].fillna(site_df['Date-Time_'])
#site_df['Hour'] = site_df['Hour'].fillna(site_df['Hour_'])
site_df = site_df[selection]
s = str(site_no).zfill(8)
write_filename = f'az://{container}/stations/{str(site_no)}.csv'
print(f'writing to {write_filename}')
if args.write_to_csv:
site_df.to_csv(write_filename, index=False, storage_options=storage_options)
if args.data_src == 'itv':
container = 'itv-data'
station_url = f'az://{container}/itv_station_metadata.csv'
station_df = pd.read_csv(station_url, storage_options=storage_options)
for site_no in station_df.site_no:
station_url = f'az://{container}/{site_no}.csv'
site_df = pd.read_csv(station_url,\
storage_options=storage_options,\
delimiter=',')
site_df['Date-Time'] = pd.to_datetime(site_df['Campaign Date'], \
format='%d/%m/%Y')
if args.write_to_csv:
write_filename = f'az://{container}/stations/{site_no}.csv'
site_df.to_csv(write_filename, storage_options=storage_options,\
index=False)
|
import os
import pandas as pd
import fsspec
import argparse
from src.defaults import args_info
env_vars = open("/content/credentials","r").read().split('\n')
for var in env_vars[:-1]:
key, value = var.split(' = ')
os.environ[key] = value
storage_options={'account_name':os.environ['ACCOUNT_NAME'],\
'account_key':os.environ['BLOB_KEY']}
fs = fsspec.filesystem('az', account_name=storage_options['account_name'], account_key=storage_options['account_key'])
##env data acquired
def return_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--data-src',
type=args_info["data_src"]["type"],
help=args_info["data_src"]["help"])
parser.add_argument('--write-to-csv',
action=args_info["write_to_csv"]["action"],
help=args_info["write_to_csv"]["help"])
return parser
if __name__ == "__main__":
args = return_parser().parse_args()
if args.data_src == 'usgs':
#USGS DATA PROCESS
data_src = 'usgs'
container = 'usgs-data'
station_url = f'az://{container}/{args.data_src}_station_metadata_raw.csv'
station_df = pd.read_csv(station_url, storage_options=storage_options)
sites_str = [str(f).zfill(8) for f in station_df.site_no]
station_df['sites_str'] = sites_str
query = []
for f in fs.ls(f'{container}/stations'):
station = os.path.basename(f).split('_')[0]
query.append(station)
q = pd.DataFrame({'sites_str':query})
out = station_df.merge(q, on='sites_str')
out['site_no'] = out['sites_str']
out = out[['site_no','site_name', 'Latitude', 'Longitude','geometry']]
if args.write_to_csv:
out.to_csv(f'az://{container}/usgs_station_metadata.csv',index=False, storage_options=storage_options)
if args.data_src == 'ana':
container = 'ana-data'
station_url = f'az://{container}/ana_station_metadata.csv'
station_df = pd.read_csv(station_url, storage_options=storage_options)
for site_no in station_df.site_no:
station_url = f'az://{container}/{site_no}.csv'
station_url2 = f'az://{container}/{site_no}_2.csv'
site_df1_raw = pd.read_csv(station_url, delimiter=',', skiprows=10, storage_options=storage_options)
translation = pd.read_csv(f'az://{container}/ana_translations.csv', storage_options=storage_options)
trans = {p:e for p,e in zip(translation.Portuguese, translation.English)}
site_df1 = site_df1_raw.rename(columns=trans)
site_df1 = site_df1.dropna(subset=['Date'])
site_df1['TimeL'] = site_df1['TimeL'].fillna('01/01/1900 01:00')
site_df1['Date-Time'] = [d for d in site_df1['Date']]
site_df1['Date-Time'] = pd.to_datetime(site_df1['Date-Time'],\
format='%d/%m/%Y')
site_df2_raw = pd.read_csv(station_url2, delimiter=',', skiprows=14, storage_options=storage_options)
site_df2_raw = site_df2_raw.replace('01/01/1900', '01/01/1900 01:00')
translation2 = {'Data':'Date','Hora':'Hour','Turbidez':'Turbidity'}
site_df2 = site_df2_raw.rename(columns=translation2)
site_df2 = site_df2.dropna(subset=['Date'])
site_df2['Date-Time-HM'] = [f"{d} {t.split(' ')[1]}" for d,t in zip(site_df2['Date'],site_df2['Hour'])]
site_df2['Date-Time'] = [d for d in site_df2['Date']]
site_df2['Date-Time'] = pd.to_datetime(site_df2['Date-Time'],\
format='%d/%m/%Y')
site_df2 = site_df2[['Date', 'Hour', 'Date-Time','Turbidity']]
selection = ['Date-Time', 'Discharge', 'Suspended Sediment Concentration (mg/L)', 'Turbidity']
site_df = site_df1.merge(site_df2, on='Date', how='outer', suffixes=('_',''))
site_df['Date-Time'] = site_df['Date-Time'].fillna(site_df['Date-Time_'])
#site_df['Hour'] = site_df['Hour'].fillna(site_df['Hour_'])
site_df = site_df[selection]
s = str(site_no).zfill(8)
write_filename = f'az://{container}/stations/{str(site_no)}.csv'
print(f'writing to {write_filename}')
if args.write_to_csv:
site_df.to_csv(write_filename, index=False, storage_options=storage_options)
if args.data_src == 'itv':
container = 'itv-data'
station_url = f'az://{container}/itv_station_metadata.csv'
station_df = pd.read_csv(station_url, storage_options=storage_options)
for site_no in station_df.site_no:
station_url = f'az://{container}/{site_no}.csv'
site_df = pd.read_csv(station_url,\
storage_options=storage_options,\
delimiter=',')
site_df['Date-Time'] = pd.to_datetime(site_df['Campaign Date'], \
format='%d/%m/%Y')
if args.write_to_csv:
write_filename = f'az://{container}/stations/{site_no}.csv'
site_df.to_csv(write_filename, storage_options=storage_options,\
index=False)
|
"""CLI argument parsing."""
import argparse
# from ..io import EXTENSIONS
from ._parseutil import Color
from ._parseutil import CustomFormatter
from ._parseutil import FileFolderType
from ._parseutil import FileType
from ._parseutil import FolderType
from ._parseutil import ProbabilityType
from ._parseutil import ShapeType
from ._parseutil import _add_utils
EXTENSIONS = ("tif", "jpeg", "jpg", "png")
def _parse_args_check(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser,
):
"""Subparser for checking."""
parser = subparsers.add_parser(
"check",
parents=[parent_parser],
formatter_class=CustomFormatter,
add_help=False,
description=(
f"\U0001F537 {Color.title}Checking submodule{Color.end} \U0001F537\n\n"
"Check the arrangement of your image's axis also known as image shape. "
),
help="\U0001F537 Determine your input image's shape.",
)
group1 = parser.add_argument_group(f"{Color.required}Required{Color.end}")
group1.add_argument(
"INPUT",
type=FileType(EXTENSIONS),
help=(
"Input image. "
"Path to the image file to be checked. "
"The path be relative (e.g. ../dir) or absolute (e.g. /Users/myname/). "
"Input can either be given as path to a directory containing files or as a single file. "
"Note that only the specified filetypes will be processed. "
f"[required] [filetypes: {", ".join(EXTENSIONS)}]"
),
)
_add_utils(parser)
def _parse_args_config(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser
):
"""Subparser for configuration."""
parser = subparsers.add_parser(
"config",
parents=[parent_parser],
add_help=False,
formatter_class=CustomFormatter,
description=(
f"\U0001F528 {Color.title}Configuration submodule{Color.end} \U0001F528\n\n"
"Prepare a configuration file used to adjust parameters during training. "
),
help="\U0001F528 Create a configuration file for training.",
)
group2 = parser.add_argument_group(f"{Color.optional}Optional{Color.end}")
group2.add_argument(
"-n",
"--name",
type=str,
default="config",
help=(
"Custom configuration name. "
'The file extension "yaml" will be added automatically to the given name. '
'[default: "config"]'
),
)
group2.add_argument(
"-r",
"--raw",
action="store_true",
help=(
"Save configuration file without description of values. "
"Shorter but not descriptive."
),
)
_add_utils(parser)
def _parse_args_create(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser,
):
"""Subparser for creation."""
parser = subparsers.add_parser(
"create",
parents=[parent_parser],
formatter_class=CustomFormatter,
add_help=False,
description=(
f"\U0001F4BE {Color.title}Creation submodule{Color.end} \U0001F4BE\n\n"
"Create a custom dataset with raw files and corresponding labels. "
"Relies on labeling output from FIJI that was saved with the provided macro "
"or the standard TrackMate coordinate output. "
'Both are described here "https://github.com/BBQuercus/deepBlink/wiki/Datasets".'
),
help="\U0001F4BE Create a new dataset from raw files.",
)
group1 = parser.add_argument_group(f"{Color.required}Required{Color.end}")
group1.add_argument(
"-i",
"--input",
required=True,
type=FolderType(),
help=(
"Path to the directory containing raw images. "
"Note that only the specified filetypes will be processed. "
f"[required] [filetypes: {", ".join(EXTENSIONS)}]"
),
)
group2 = parser.add_argument_group(f"{Color.optional}Optional{Color.end}")
group2.add_argument(
"-l",
"--labels",
type=FolderType(),
help=(
"Path to the directory containing labels in csv format. "
"The default path accounts for using the FIJI macro described on the wiki. "
"[default: --INPUT/labels/]"
),
)
group2.add_argument(
"-n",
"--name",
default="dataset",
type=str,
help=(
"Custom dataset name. "
'The file extension "npz" will be added automatically. '
'[default: "dataset"]'
),
)
group2.add_argument(
"-s",
"--size",
default=512,
type=int,
help=(
"Image crop size. "
"If given, crops all images into the specified size. "
"Will crop non-overlapping and ignore areas that did not get covered."
"deepBlink requires square images powers of 2, such as 256, 512... "
"[default: 512]"
),
)
group2.add_argument(
"-m",
"--minspots",
default=1,
type=int,
help=(
"Minimum number of spots per crop. "
"Ignores fields of view generated with fewer than minspots number of spots. "
"[default: 1]"
),
)
group2.add_argument(
"-vs",
"--validsplit",
default=0.2,
type=float,
help=(
"Validation split. "
"Split percentage (scaled between 0 - 1) of validation vs. train set. "
"Note the validation split is done after splitting test and trainval. "
"[default: 0.2]"
),
)
group2.add_argument(
"-ts",
"--testsplit",
default=0.2,
type=float,
help=(
"Testing split. "
"Split percentage (scaled between 0 - 1) of test vs. trainval set. "
"[default: 0.2]"
),
)
_add_utils(parser)
def _parse_args_download(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser,
):
"""Subparser for downloading."""
parser = subparsers.add_parser(
"download",
parents=[parent_parser],
formatter_class=CustomFormatter,
add_help=False,
description=(
f"\U0001F4E5 {Color.title}Downloading submodule{Color.end} \U0001F4E5\n\n"
"Download pre-trained models from our online figshare repository to predict. "
),
help="\U0001F4E5 Download pre-trained models for use.",
)
group2 = parser.add_argument_group(f"{Color.optional}Optional{Color.end}")
group2.add_argument(
"-i",
"--input",
type=str,
default=None,
help=(
"Input name. "
"Name of the model to be downloaded. "
'Note that only the models listed in "deepblink download --list" will be processed. '
"[default: None]"
),
)
group2.add_argument(
"-l",
"--list",
action="store_true",
help=("List available models. " "Name of the model to be downloaded. "),
)
group2.add_argument(
"-a",
"--all",
action="store_true",
help=(
"Download all available models. "
"If passed, all models will be downloaded. "
),
)
_add_utils(parser)
def _parse_args_predict(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser,
):
"""Subparser for prediction."""
parser = subparsers.add_parser(
"predict",
parents=[parent_parser],
add_help=False,
formatter_class=CustomFormatter,
description=(
f"\U0001F914 {Color.title}Prediction submodule{Color.end} \U0001F914\n\n"
"Use a pre-trained model to predict blob coordinates on new data. "
"In addition to the required model and input file or folder, "
"several optional features are accessible as described below."
),
help="\U0001F914 Predict on data with a pre-trained model.",
)
group1 = parser.add_argument_group(f"{Color.required}Required{Color.end}")
group1.add_argument(
"-i",
"--input",
required=True,
type=FileFolderType(EXTENSIONS),
help=(
"Image files to predict on. "
"Input can either be given as path to a directory containing files or as a single file. "
"The path be relative (e.g. ../dir) or absolute (e.g. /Users/myname/). "
"Fileglobs are currently not available. "
"Note that only the specified filetypes will be processed. "
f"[required] [filetypes: {", ".join(EXTENSIONS)}]"
),
)
group1.add_argument(
"-m",
"--model",
required=True,
type=FileType(["h5"]),
help=(
"DeepBlink model. "
'Model has to be of file type ".h5". '
'The path can be relative or absolute as described in "--input". '
'Model can either be trained on custom data using "deepblink train" or using a pre-trained '
'model available through the GitHub wiki on "https://github.com/BBQuercus/deepBlink/wiki". '
"[required]"
),
)
group2 = parser.add_argument_group(f"{Color.optional}Optional{Color.end}")
group2.add_argument(
"-o",
"--output",
type=FolderType(),
help=(
"Output folder path. "
"Path to the directory into which all output files are saved. "
"Output files will automatically take the same name as their corresponding image. "
"[default: input location]"
),
)
group2.add_argument(
"-r",
"--radius",
type=int,
default=None,
help=(
"Intensity radius. "
"If given, will calculate the integrated intensity in the specified radius around each coordinate. "
"If the radius is set to zero if only the central pixels intensity should be calculated. "
'The intensity is added as additional column to the output file called "i". '
"[default: None]"
),
)
group2.add_argument(
"-s",
"--shape",
type=ShapeType(),
default=None,
help=(
"Image shape. "
"Used to assess the arrangement of input image axes otherwise known as shape. "
"If not given, uses a basic prediction based on common defaults. "
'Must be in the format "(x,y,z,t,c,3)" using the specified characters. '
'If unsure, use "deepblink check" to determine your images shape '
"and more detailed information. "
"[default: None]"
),
)
group2.add_argument(
"-p",
"--probability",
type=ProbabilityType(),
default=None,
help=(
"Prediction probability. "
"By default, the model's output probability map is rounded. "
"I.e. probabilities above 0.5 are included in the final output. "
"Setting this flag will first change this rounding behaviour to the "
"number provided (0.0 - 1.0) and secondly, add a probability / p "
"column in the output csv file. "
"[default: None]"
),
)
_add_utils(parser)
def _parse_args_train(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser,
):
"""Subparser for training."""
parser = subparsers.add_parser(
"train",
parents=[parent_parser],
formatter_class=CustomFormatter,
add_help=False,
description=(
f"\U0001F686 {Color.title}Training submodule{Color.end} \U0001F686\n\n"
'Train a custom model using a custom dataset created in "deepblink create" '
"or using a published dataset."
),
help="\U0001F686 Train a freshly baked model on a dataset.",
)
group1 = parser.add_argument_group(f"{Color.required}Required{Color.end}")
group1.add_argument(
"-c",
"--config",
type=FileType(["yaml"]),
required=True,
help=(
"Configuration file. "
'Path to the config.yaml created using "deepblink config". '
"The path be relative (e.g. ../dir) or absolute (e.g. /Users/myname/). "
"Please see the training information on the wiki to configure the file to your requirements. "
"[required]"
),
)
group2 = parser.add_argument_group(f"{Color.optional}Optional{Color.end}")
group2.add_argument(
"-g",
"--gpu",
type=int,
default=None,
help=(
"GPU index. "
"Value passed CUDA_VISIBLE_DEVICES if a GPU is used for training. "
"[default: None]"
),
)
_add_utils(parser)
|
"""CLI argument parsing."""
import argparse
# from ..io import EXTENSIONS
from ._parseutil import Color
from ._parseutil import CustomFormatter
from ._parseutil import FileFolderType
from ._parseutil import FileType
from ._parseutil import FolderType
from ._parseutil import ProbabilityType
from ._parseutil import ShapeType
from ._parseutil import _add_utils
EXTENSIONS = ("tif", "jpeg", "jpg", "png")
def _parse_args_check(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser,
):
"""Subparser for checking."""
parser = subparsers.add_parser(
"check",
parents=[parent_parser],
formatter_class=CustomFormatter,
add_help=False,
description=(
f"\U0001F537 {Color.title}Checking submodule{Color.end} \U0001F537\n\n"
"Check the arrangement of your image's axis also known as image shape. "
),
help="\U0001F537 Determine your input image's shape.",
)
group1 = parser.add_argument_group(f"{Color.required}Required{Color.end}")
group1.add_argument(
"INPUT",
type=FileType(EXTENSIONS),
help=(
"Input image. "
"Path to the image file to be checked. "
"The path be relative (e.g. ../dir) or absolute (e.g. /Users/myname/). "
"Input can either be given as path to a directory containing files or as a single file. "
"Note that only the specified filetypes will be processed. "
f"[required] [filetypes: {', '.join(EXTENSIONS)}]"
),
)
_add_utils(parser)
def _parse_args_config(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser
):
"""Subparser for configuration."""
parser = subparsers.add_parser(
"config",
parents=[parent_parser],
add_help=False,
formatter_class=CustomFormatter,
description=(
f"\U0001F528 {Color.title}Configuration submodule{Color.end} \U0001F528\n\n"
"Prepare a configuration file used to adjust parameters during training. "
),
help="\U0001F528 Create a configuration file for training.",
)
group2 = parser.add_argument_group(f"{Color.optional}Optional{Color.end}")
group2.add_argument(
"-n",
"--name",
type=str,
default="config",
help=(
"Custom configuration name. "
'The file extension "yaml" will be added automatically to the given name. '
'[default: "config"]'
),
)
group2.add_argument(
"-r",
"--raw",
action="store_true",
help=(
"Save configuration file without description of values. "
"Shorter but not descriptive."
),
)
_add_utils(parser)
def _parse_args_create(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser,
):
"""Subparser for creation."""
parser = subparsers.add_parser(
"create",
parents=[parent_parser],
formatter_class=CustomFormatter,
add_help=False,
description=(
f"\U0001F4BE {Color.title}Creation submodule{Color.end} \U0001F4BE\n\n"
"Create a custom dataset with raw files and corresponding labels. "
"Relies on labeling output from FIJI that was saved with the provided macro "
"or the standard TrackMate coordinate output. "
'Both are described here "https://github.com/BBQuercus/deepBlink/wiki/Datasets".'
),
help="\U0001F4BE Create a new dataset from raw files.",
)
group1 = parser.add_argument_group(f"{Color.required}Required{Color.end}")
group1.add_argument(
"-i",
"--input",
required=True,
type=FolderType(),
help=(
"Path to the directory containing raw images. "
"Note that only the specified filetypes will be processed. "
f"[required] [filetypes: {', '.join(EXTENSIONS)}]"
),
)
group2 = parser.add_argument_group(f"{Color.optional}Optional{Color.end}")
group2.add_argument(
"-l",
"--labels",
type=FolderType(),
help=(
"Path to the directory containing labels in csv format. "
"The default path accounts for using the FIJI macro described on the wiki. "
"[default: --INPUT/labels/]"
),
)
group2.add_argument(
"-n",
"--name",
default="dataset",
type=str,
help=(
"Custom dataset name. "
'The file extension "npz" will be added automatically. '
'[default: "dataset"]'
),
)
group2.add_argument(
"-s",
"--size",
default=512,
type=int,
help=(
"Image crop size. "
"If given, crops all images into the specified size. "
"Will crop non-overlapping and ignore areas that did not get covered."
"deepBlink requires square images powers of 2, such as 256, 512... "
"[default: 512]"
),
)
group2.add_argument(
"-m",
"--minspots",
default=1,
type=int,
help=(
"Minimum number of spots per crop. "
"Ignores fields of view generated with fewer than minspots number of spots. "
"[default: 1]"
),
)
group2.add_argument(
"-vs",
"--validsplit",
default=0.2,
type=float,
help=(
"Validation split. "
"Split percentage (scaled between 0 - 1) of validation vs. train set. "
"Note the validation split is done after splitting test and trainval. "
"[default: 0.2]"
),
)
group2.add_argument(
"-ts",
"--testsplit",
default=0.2,
type=float,
help=(
"Testing split. "
"Split percentage (scaled between 0 - 1) of test vs. trainval set. "
"[default: 0.2]"
),
)
_add_utils(parser)
def _parse_args_download(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser,
):
"""Subparser for downloading."""
parser = subparsers.add_parser(
"download",
parents=[parent_parser],
formatter_class=CustomFormatter,
add_help=False,
description=(
f"\U0001F4E5 {Color.title}Downloading submodule{Color.end} \U0001F4E5\n\n"
"Download pre-trained models from our online figshare repository to predict. "
),
help="\U0001F4E5 Download pre-trained models for use.",
)
group2 = parser.add_argument_group(f"{Color.optional}Optional{Color.end}")
group2.add_argument(
"-i",
"--input",
type=str,
default=None,
help=(
"Input name. "
"Name of the model to be downloaded. "
'Note that only the models listed in "deepblink download --list" will be processed. '
"[default: None]"
),
)
group2.add_argument(
"-l",
"--list",
action="store_true",
help=("List available models. " "Name of the model to be downloaded. "),
)
group2.add_argument(
"-a",
"--all",
action="store_true",
help=(
"Download all available models. "
"If passed, all models will be downloaded. "
),
)
_add_utils(parser)
def _parse_args_predict(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser,
):
"""Subparser for prediction."""
parser = subparsers.add_parser(
"predict",
parents=[parent_parser],
add_help=False,
formatter_class=CustomFormatter,
description=(
f"\U0001F914 {Color.title}Prediction submodule{Color.end} \U0001F914\n\n"
"Use a pre-trained model to predict blob coordinates on new data. "
"In addition to the required model and input file or folder, "
"several optional features are accessible as described below."
),
help="\U0001F914 Predict on data with a pre-trained model.",
)
group1 = parser.add_argument_group(f"{Color.required}Required{Color.end}")
group1.add_argument(
"-i",
"--input",
required=True,
type=FileFolderType(EXTENSIONS),
help=(
"Image files to predict on. "
"Input can either be given as path to a directory containing files or as a single file. "
"The path be relative (e.g. ../dir) or absolute (e.g. /Users/myname/). "
"Fileglobs are currently not available. "
"Note that only the specified filetypes will be processed. "
f"[required] [filetypes: {', '.join(EXTENSIONS)}]"
),
)
group1.add_argument(
"-m",
"--model",
required=True,
type=FileType(["h5"]),
help=(
"DeepBlink model. "
'Model has to be of file type ".h5". '
'The path can be relative or absolute as described in "--input". '
'Model can either be trained on custom data using "deepblink train" or using a pre-trained '
'model available through the GitHub wiki on "https://github.com/BBQuercus/deepBlink/wiki". '
"[required]"
),
)
group2 = parser.add_argument_group(f"{Color.optional}Optional{Color.end}")
group2.add_argument(
"-o",
"--output",
type=FolderType(),
help=(
"Output folder path. "
"Path to the directory into which all output files are saved. "
"Output files will automatically take the same name as their corresponding image. "
"[default: input location]"
),
)
group2.add_argument(
"-r",
"--radius",
type=int,
default=None,
help=(
"Intensity radius. "
"If given, will calculate the integrated intensity in the specified radius around each coordinate. "
"If the radius is set to zero if only the central pixels intensity should be calculated. "
'The intensity is added as additional column to the output file called "i". '
"[default: None]"
),
)
group2.add_argument(
"-s",
"--shape",
type=ShapeType(),
default=None,
help=(
"Image shape. "
"Used to assess the arrangement of input image axes otherwise known as shape. "
"If not given, uses a basic prediction based on common defaults. "
'Must be in the format "(x,y,z,t,c,3)" using the specified characters. '
'If unsure, use "deepblink check" to determine your images shape '
"and more detailed information. "
"[default: None]"
),
)
group2.add_argument(
"-p",
"--probability",
type=ProbabilityType(),
default=None,
help=(
"Prediction probability. "
"By default, the model's output probability map is rounded. "
"I.e. probabilities above 0.5 are included in the final output. "
"Setting this flag will first change this rounding behaviour to the "
"number provided (0.0 - 1.0) and secondly, add a probability / p "
"column in the output csv file. "
"[default: None]"
),
)
_add_utils(parser)
def _parse_args_train(
subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser,
):
"""Subparser for training."""
parser = subparsers.add_parser(
"train",
parents=[parent_parser],
formatter_class=CustomFormatter,
add_help=False,
description=(
f"\U0001F686 {Color.title}Training submodule{Color.end} \U0001F686\n\n"
'Train a custom model using a custom dataset created in "deepblink create" '
"or using a published dataset."
),
help="\U0001F686 Train a freshly baked model on a dataset.",
)
group1 = parser.add_argument_group(f"{Color.required}Required{Color.end}")
group1.add_argument(
"-c",
"--config",
type=FileType(["yaml"]),
required=True,
help=(
"Configuration file. "
'Path to the config.yaml created using "deepblink config". '
"The path be relative (e.g. ../dir) or absolute (e.g. /Users/myname/). "
"Please see the training information on the wiki to configure the file to your requirements. "
"[required]"
),
)
group2 = parser.add_argument_group(f"{Color.optional}Optional{Color.end}")
group2.add_argument(
"-g",
"--gpu",
type=int,
default=None,
help=(
"GPU index. "
"Value passed CUDA_VISIBLE_DEVICES if a GPU is used for training. "
"[default: None]"
),
)
_add_utils(parser)
|
from .TLiDB_dataset import TLiDB_Dataset
from tlidb.TLiDB.metrics.all_metrics import Accuracy
class clinc150_dataset(TLiDB_Dataset):
"""
CLINC150 dataset
This is the full dataset from https://github.com/clinc/oos-eval
Input (x):
- text (str): Text utterance
Target (y):
- label (list): List of [Domain, Intent] labels
Metadata:
- domain (str): Domain of the utterance
"""
_dataset_name = "clinc150"
_tasks = ["intent_detection"]
_url = "https://drive.google.com/uc?export=download&id=1dG6KXQ6L7xpbnWmhW9Xo3vPSfYstk43E"
def __init__(self, task, dataset_folder, model_type, split=None):
assert task in self._tasks, f"{task} is not a valid task for {self._dataset_name}"
super().__init__(self.dataset_name, task, model_type, dataset_folder=dataset_folder)
# initialize task data and metadata
categories = [
"auto and commute","banking","credit cards","home",
"kitchen and dining","meta","small talk","travel",
"utility","work"
]
self._input_array = []
self._y_array = []
self._metadata_fields = ["domains"]
self._metadata_array = [[] for _ in self._metadata_fields]
# convert labels to human readable
labels = [label.replace("_"," ") for label in self.task_labels]
formatted_labels = []
for label in labels:
for c in categories:
if c == label[:len(c)]:
formatted_label = c+":"+label[len(c):]
formatted_labels.append(formatted_label)
self.task_labels = formatted_labels
for datum in self.dataset['data']:
if split and datum['dialogue_metadata']['original_data_partition'] != split:
continue
utterance = datum['dialogue'][0]
domain = utterance['intent_detection']['domain']
intent = utterance['intent_detection']['intent']
self._input_array.append(utterance['utterance'])
self._y_array.append([domain, intent])
self.get_metadata_field("domains").append(domain)
self._num_classes = len(self.task_labels)
self._y_size = len(self._y_array)
def get_input(self, idx):
return self._input_array[idx]
def get_metadata(self, idx):
return {
"domains": self.get_metadata_field("domains")[idx],
}
def _collate_encoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace("_"," ")}: {item[1][1].replace("_"," ")}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
return X,y, metadata
def _collate_decoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace("_"," ")}: {item[1][1].replace("_"," ")}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
labels = self.task_labels
if labels:
metadata['labels'] = labels
return X,y, metadata
def _collate_encoderdecoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace("_"," ")}: {item[1][1].replace("_"," ")}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
labels = self.task_labels
if labels:
metadata['labels'] = labels
return X,y, metadata
|
from .TLiDB_dataset import TLiDB_Dataset
from tlidb.TLiDB.metrics.all_metrics import Accuracy
class clinc150_dataset(TLiDB_Dataset):
"""
CLINC150 dataset
This is the full dataset from https://github.com/clinc/oos-eval
Input (x):
- text (str): Text utterance
Target (y):
- label (list): List of [Domain, Intent] labels
Metadata:
- domain (str): Domain of the utterance
"""
_dataset_name = "clinc150"
_tasks = ["intent_detection"]
_url = "https://drive.google.com/uc?export=download&id=1dG6KXQ6L7xpbnWmhW9Xo3vPSfYstk43E"
def __init__(self, task, dataset_folder, model_type, split=None):
assert task in self._tasks, f"{task} is not a valid task for {self._dataset_name}"
super().__init__(self.dataset_name, task, model_type, dataset_folder=dataset_folder)
# initialize task data and metadata
categories = [
"auto and commute","banking","credit cards","home",
"kitchen and dining","meta","small talk","travel",
"utility","work"
]
self._input_array = []
self._y_array = []
self._metadata_fields = ["domains"]
self._metadata_array = [[] for _ in self._metadata_fields]
# convert labels to human readable
labels = [label.replace("_"," ") for label in self.task_labels]
formatted_labels = []
for label in labels:
for c in categories:
if c == label[:len(c)]:
formatted_label = c+":"+label[len(c):]
formatted_labels.append(formatted_label)
self.task_labels = formatted_labels
for datum in self.dataset['data']:
if split and datum['dialogue_metadata']['original_data_partition'] != split:
continue
utterance = datum['dialogue'][0]
domain = utterance['intent_detection']['domain']
intent = utterance['intent_detection']['intent']
self._input_array.append(utterance['utterance'])
self._y_array.append([domain, intent])
self.get_metadata_field("domains").append(domain)
self._num_classes = len(self.task_labels)
self._y_size = len(self._y_array)
def get_input(self, idx):
return self._input_array[idx]
def get_metadata(self, idx):
return {
"domains": self.get_metadata_field("domains")[idx],
}
def _collate_encoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace('_',' ')}: {item[1][1].replace('_',' ')}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
return X,y, metadata
def _collate_decoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace('_',' ')}: {item[1][1].replace('_',' ')}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
labels = self.task_labels
if labels:
metadata['labels'] = labels
return X,y, metadata
def _collate_encoderdecoder(self, batch):
X,y, metadata = [], [], {}
for item in batch:
X.append(item[0])
y.append(f"{item[1][0].replace('_',' ')}: {item[1][1].replace('_',' ')}")
for k, v in item[2].items():
if k not in metadata:
metadata[k] = []
metadata[k].append(v)
labels = self.task_labels
if labels:
metadata['labels'] = labels
return X,y, metadata
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from allennlp.predictors.predictor import Predictor
# ## Instantiate AllenNLP `Predictor`
# 1. Load the same model that is used in the [demo](https://demo.allennlp.org/coreference-resolution) (*don't get alarmed by the warning - we don't need to fine-tune the model to use it*).
# 2. Get the prediction :)
# In[2]:
model_url = 'https://storage.googleapis.com/allennlp-public-models/coref-spanbert-large-2020.02.27.tar.gz'
predictor = Predictor.from_path(model_url)
# In[3]:
text = "Eva and Martha didn't want their friend Jenny to feel lonely so they invited her to the party in Las Vegas."
prediction = predictor.predict(document=text)
# ## Coreference resolution with `Allen Institute`
# What we get as a result (`prediction`) is a dictionary as Allen outputs multiple different information at once.
# The ones that we found to be using the most are:
#
# | Key | Type | Description
# |:--------------------------|:-------------------|:----------------------------------------------------
# | `top_spans` | `List[List[int]]` | List of `spaCy` token indices pairs representing spans
# | `document` | `List[str]` | Document's tokens (from `spaCy`; but represented as string not Token)
# | `clusters` | `List[List[List[int]]]` | Clusters of spans (represented by token indices pairs)
# In[4]:
# it's our original text (with extra whitespaces as we trivialy just joined tokens with ' ')
' '.join(prediction['document'])
# In[5]:
# and the found clusters - however, they are not easily understood...
prediction['clusters']
# In[6]:
# but that's how it looks after coreference resolution (notice the possessive!)
predictor.coref_resolved(text)
# As Allen's coreference resolution `Predictor` has quite a limited number of functionalities, in order to turn its output to a more readable one, we need to manually write some functions:
# In[7]:
def get_span_words(span, document):
return ' '.join(document[span[0]:span[1]+1])
def print_clusters(prediction):
document, clusters = prediction['document'], prediction['clusters']
for cluster in clusters:
print(get_span_words(cluster[0], document) + ': ', end='')
print(f"[{"; ".join([get_span_words(span, document) for span in cluster])}]")
# In[8]:
print_clusters(prediction)
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from allennlp.predictors.predictor import Predictor
# ## Instantiate AllenNLP `Predictor`
# 1. Load the same model that is used in the [demo](https://demo.allennlp.org/coreference-resolution) (*don't get alarmed by the warning - we don't need to fine-tune the model to use it*).
# 2. Get the prediction :)
# In[2]:
model_url = 'https://storage.googleapis.com/allennlp-public-models/coref-spanbert-large-2020.02.27.tar.gz'
predictor = Predictor.from_path(model_url)
# In[3]:
text = "Eva and Martha didn't want their friend Jenny to feel lonely so they invited her to the party in Las Vegas."
prediction = predictor.predict(document=text)
# ## Coreference resolution with `Allen Institute`
# What we get as a result (`prediction`) is a dictionary as Allen outputs multiple different information at once.
# The ones that we found to be using the most are:
#
# | Key | Type | Description
# |:--------------------------|:-------------------|:----------------------------------------------------
# | `top_spans` | `List[List[int]]` | List of `spaCy` token indices pairs representing spans
# | `document` | `List[str]` | Document's tokens (from `spaCy`; but represented as string not Token)
# | `clusters` | `List[List[List[int]]]` | Clusters of spans (represented by token indices pairs)
# In[4]:
# it's our original text (with extra whitespaces as we trivialy just joined tokens with ' ')
' '.join(prediction['document'])
# In[5]:
# and the found clusters - however, they are not easily understood...
prediction['clusters']
# In[6]:
# but that's how it looks after coreference resolution (notice the possessive!)
predictor.coref_resolved(text)
# As Allen's coreference resolution `Predictor` has quite a limited number of functionalities, in order to turn its output to a more readable one, we need to manually write some functions:
# In[7]:
def get_span_words(span, document):
return ' '.join(document[span[0]:span[1]+1])
def print_clusters(prediction):
document, clusters = prediction['document'], prediction['clusters']
for cluster in clusters:
print(get_span_words(cluster[0], document) + ': ', end='')
print(f"[{'; '.join([get_span_words(span, document) for span in cluster])}]")
# In[8]:
print_clusters(prediction)
|
import io
import json
import zipfile
from functools import cached_property
from typing import Callable, Dict, KeysView, List, NamedTuple, Set, Union
import requests
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ObjectDoesNotExist, SuspiciousOperation
from django.core.files import File
from django.core.paginator import Paginator
from django.db.models import Prefetch
from django.http import (
FileResponse,
HttpResponse,
HttpResponseRedirect,
JsonResponse,
StreamingHttpResponse,
)
from django.shortcuts import redirect, reverse
from django.views import generic
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.list import MultipleObjectMixin
from accounts.utils import (
hash_child_id,
hash_demographic_id,
hash_id,
hash_participant_id,
)
from exp.utils import (
RESPONSE_PAGE_SIZE,
csv_dict_output_and_writer,
csv_namedtuple_writer,
flatten_dict,
round_age,
round_ages_from_birthdays,
study_name_for_files,
)
from exp.views.mixins import (
CanViewStudyResponsesMixin,
ResearcherLoginRequiredMixin,
SingleObjectFetchProtocol,
StudyLookupMixin,
)
from studies.models import Feedback, Response, Study, Video
from studies.permissions import StudyPermission
from studies.queries import (
get_consent_statistics,
get_responses_with_current_rulings_and_videos,
)
from studies.tasks import build_framedata_dict, build_zipfile_of_videos
class ResponseDataColumn(NamedTuple):
# id: Unique key to identify data. Used as CSV column header and any portion before __ is used to create a
# sub-dictionary for JSON data.
id: str
description: str # Description for data dictionary
extractor: Callable[
[Union[Response, Dict]], Union[str, List]
] # Function to extract value from response instance or dict
optional: bool = False # is a column the user checks a box to include?
name: str = "" # used in template form for optional columns
include_by_default: bool = False # whether to initially check checkbox for field
identifiable: bool = False # used to determine filename signaling
# Columns for response downloads. Extractor functions expect Response instance
RESPONSE_COLUMNS = [
ResponseDataColumn(
id="response__id",
description="Short ID for this response",
extractor=lambda resp: str(resp.id),
name="Response ID",
),
ResponseDataColumn(
id="response__uuid",
description="Unique identifier for response. Can be used to match data to video filenames.",
extractor=lambda resp: str(resp.uuid),
name="Response UUID",
),
ResponseDataColumn(
id="response__date_created",
description="Timestamp for when participant began session, in format e.g. 2019-11-07 17:13:38.702958+00:00",
extractor=lambda resp: str(resp.date_created),
name="Date created",
),
ResponseDataColumn(
id="response__completed",
description=(
"Whether the participant submitted the exit survey; depending on study criteria, this may not align "
"with whether the session is considered complete. E.g., participant may have left early but submitted "
"exit survey, or may have completed all test trials but not exit survey."
),
extractor=lambda resp: resp.completed,
name="Completed",
),
ResponseDataColumn(
id="response__withdrawn",
description=(
"Whether the participant withdrew permission for viewing/use of study video beyond consent video. If "
"true, video will not be available and must not be used."
),
extractor=lambda resp: resp.withdrawn,
name="Withdrawn",
),
ResponseDataColumn(
id="response__parent_feedback",
description=(
"Freeform parent feedback entered into the exit survey, if any. This field may incidentally contain "
"identifying or sensitive information depending on what parents say, so it should be scrubbed or "
"omitted from published data."
),
extractor=lambda resp: resp.parent_feedback,
name="Parent feedback",
),
ResponseDataColumn(
id="response__birthdate_difference",
description=(
"Difference between birthdate entered in exit survey, if any, and birthdate of registered child "
"participating. Positive values mean that the birthdate from the exit survey is LATER. Blank if "
"no birthdate available from the exit survey."
),
extractor=lambda resp: resp.birthdate_difference,
name="Birthdate difference",
),
ResponseDataColumn(
id="response__video_privacy",
description=(
"Privacy level for videos selected during the exit survey, if the parent completed the exit survey. "
"Possible levels are 'private' (only people listed on your IRB protocol can view), 'scientific' "
"(can share for scientific/educational purposes), and 'public' (can also share for publicity). "
"In no cases may videos be shared for commercial purposes. If this is missing (e.g., family stopped "
"just after the consent form and did not complete the exit survey), you must treat the video as "
"private."
),
extractor=lambda resp: resp.privacy,
name="Video privacy level",
),
ResponseDataColumn(
id="response__databrary",
description=(
"Whether the parent agreed to share video data on Databrary - 'yes' or 'no'. If missing, you must "
"treat the video as if 'no' were selected. If 'yes', the video privacy selections also apply to "
"authorized Databrary users."
),
extractor=lambda resp: resp.databrary,
name="Databrary sharing",
),
ResponseDataColumn(
id="response__is_preview",
description=(
"Whether this response was generated by a researcher previewing the experiment. Preview data should "
"not be used in any actual analyses."
),
extractor=lambda resp: resp.is_preview,
name="Preview",
),
ResponseDataColumn(
id="consent__ruling",
description=(
"Most recent consent video ruling: one of 'accepted' (consent has been reviewed and judged to indidate "
"informed consent), 'rejected' (consent has been reviewed and judged not to indicate informed "
"consent -- e.g., video missing or parent did not read statement), or 'pending' (no current judgement, "
"e.g. has not been reviewed yet or waiting on parent email response')"
),
extractor=lambda resp: resp.most_recent_ruling,
),
ResponseDataColumn(
id="consent__arbiter",
description="Name associated with researcher account that made the most recent consent ruling",
extractor=lambda resp: resp.most_recent_ruling_arbiter,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="consent__comment",
description=(
"Comment associated with most recent consent ruling (may be used to track e.g. any cases where consent "
"was confirmed by email)"
),
extractor=lambda resp: resp.most_recent_ruling_comment,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="study__uuid",
description="Unique identifier of study associated with this response. Same for all responses to a given Lookit study.",
extractor=lambda resp: str(resp.study.uuid),
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, which "
"may have unintended privacy consequences. Researchers can use this ID to match participants across "
"studies (subject to their own IRB review), but would need to generate their own random participant "
"IDs for publication in that case. Use participant_hashed_id as a publication-safe alternative if "
"only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp.child.user.uuid),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple responses "
"from a child and for siblings, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.user.uuid,
resp.study.uuid,
resp.study.salt,
resp.study.hash_digits,
),
name="Parent ID",
),
ResponseDataColumn(
id="participant__nickname",
description=(
"Nickname associated with the family account for this response - generally the mom or dad's name. "
"Must be redacted for publication."
),
extractor=lambda resp: resp.child.user.nickname,
optional=True,
name="Parent name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__global_id",
description=(
"Primary unique identifier for the child associated with this response. Will be the same for multiple "
"responses from one child, even across different Lookit studies. MUST BE REDACTED FOR PUBLICATION "
"because this allows identification of children across different published studies, which may have "
"unintended privacy consequences. Researchers can use this ID to match participants across studies "
"(subject to their own IRB review), but would need to generate their own random participant IDs for "
"publication in that case. Use child_hashed_id as a publication-safe alternative if only analyzing "
"data from one Lookit study."
),
extractor=lambda resp: str(resp.child.uuid),
optional=True,
name="Child global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__hashed_id",
description=(
"Identifier for child associated with this response. Will be the same for multiple responses from a "
"child, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.uuid, resp.study.uuid, resp.study.salt, resp.study.hash_digits
),
name="Child ID",
),
ResponseDataColumn(
id="child__name",
description=(
"Nickname for the child associated with this response. Not necessarily a real name (we encourage "
"initials, nicknames, etc. if parents aren't comfortable providing a name) but must be redacted for "
"publication of data."
),
extractor=lambda resp: resp.child.given_name,
optional=True,
name="Child name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__birthday",
description=(
"Birthdate of child associated with this response. Must be redacted for publication of data (switch to "
"age at time of participation; either use rounded age, jitter the age, or redact timestamps of "
"participation)."
),
extractor=lambda resp: resp.child.birthday,
optional=True,
name="Birthdate",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_in_days",
description=(
"Age in days at time of response of child associated with this response, exact. This can be used in "
"conjunction with timestamps to calculate the child's birthdate, so must be jittered or redacted prior "
"to publication unless no timestamp information is shared."
),
extractor=lambda resp: (resp.date_created.date() - resp.child.birthday).days,
optional=True,
name="Age in days",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_rounded",
description=(
"Age in days at time of response of child associated with this response, rounded to the nearest 10 "
"days if under 1 year old and to the nearest 30 days if over 1 year old. May be published; however, if "
"you have more than a few sessions per participant it would be possible to infer the exact age in days "
"(and therefore birthdate) with some effort. In this case you might consider directly jittering "
"birthdates."
),
extractor=lambda resp: str(
round_age(int((resp.date_created.date() - resp.child.birthday).days))
)
if (resp.date_created and resp.child.birthday)
else "",
optional=True,
name="Rounded age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__gender",
description=(
"Parent-identified gender of child, one of 'm' (male), 'f' (female), 'o' (other), or 'na' (prefer not "
"to answer)"
),
extractor=lambda resp: resp.child.gender,
optional=True,
name="Child gender",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__age_at_birth",
description=(
"Gestational age at birth in weeks. One of '40 or more weeks', '39 weeks' through '24 weeks', "
"'Under 24 weeks', or 'Not sure or prefer not to answer'"
),
extractor=lambda resp: resp.child.age_at_birth,
optional=True,
name="Child gestational age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__language_list",
description="List of languages spoken (using language codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.language_list,
optional=True,
name="Child languages",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__condition_list",
description="List of child characteristics (using condition/characteristic codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.condition_list,
optional=True,
name="Child conditions",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__additional_information",
description=(
"Free response 'anything else you'd like us to know' field on child registration form for child "
"associated with this response. Should be redacted or reviewed prior to publication as it may include "
"names or other identifying information."
),
extractor=lambda resp: resp.child.additional_information,
optional=True,
name="Child additional information",
include_by_default=True,
identifiable=True,
),
ResponseDataColumn(
id="response__sequence",
description=(
"Each response_sequence.N field (response_sequence.0, response_sequence.1, etc.) gives the ID of the "
"Nth frame displayed during the session associated with this response. Responses may have different "
"sequences due to randomization or if a participant leaves early."
),
extractor=lambda resp: resp.sequence,
name="Response sequence",
),
ResponseDataColumn(
id="response__conditions",
description=(
"RESEARCHERS: EXPAND THIS SECTION BASED ON YOUR INDIVIDUAL STUDY. Each set of "
"response_conditions.N.(...) fields give information about condition assignment during a particular "
"frame of this study. response_conditions.0.frameName is the frame ID (corresponding to a value in "
"response_sequence) where the randomization occurred. Additional fields such as "
"response_conditions.0.conditionNum depend on the specific randomizer frames used in this study."
),
extractor=lambda resp: [
{**{"frameName": cond_frame}, **conds}
for (cond_frame, conds) in resp.conditions.items()
],
),
]
# Columns for demographic data downloads. Extractor functions expect Response values dict,
# rather than instance.
DEMOGRAPHIC_COLUMNS = [
ResponseDataColumn(
id="response__uuid",
description=(
"Primary unique identifier for response. Can be used to match demographic data to response data "
"and video filenames; must be redacted prior to publication if videos are also published."
),
extractor=lambda resp: str(resp["uuid"]),
name="Response UUID",
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, "
"which may have unintended privacy consequences. Researchers can use this ID to match participants "
"across studies (subject to their own IRB review), but would need to generate their own random "
"participant IDs for publication in that case. Use participant__hashed_id as a publication-safe "
"alternative if only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp["child__user__uuid"]),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, but is unique to this study. This may be published "
"directly."
),
extractor=lambda resp: hash_participant_id(resp),
name="Participant ID",
),
ResponseDataColumn(
id="demographic__hashed_id",
description=(
"Identifier for this demographic snapshot. Changes upon updates to the demographic form, "
"so may vary within the same participant across responses."
),
extractor=lambda resp: hash_demographic_id(resp),
name="Demographic ID",
),
ResponseDataColumn(
id="demographic__date_created",
description=(
"Timestamp of creation of the demographic snapshot associated with this response, in format e.g. "
"2019-10-02 21:39:03.713283+00:00"
),
extractor=lambda resp: str(resp["demographic_snapshot__created_at"]),
name="Date created",
),
ResponseDataColumn(
id="demographic__number_of_children",
description="Response to 'How many children do you have?'; options 0-10 or >10 (More than 10)",
extractor=lambda resp: resp["demographic_snapshot__number_of_children"],
name="Number of children",
),
ResponseDataColumn(
id="demographic__child_rounded_ages",
description=(
"List of rounded ages based on child birthdays entered in demographic form (not based on children "
"registered). Ages are at time of response for this row, in days, rounded to nearest 10 for ages "
"under 1 year and nearest 30 otherwise. In format e.g. [60, 390]"
),
extractor=lambda resp: round_ages_from_birthdays(
resp["demographic_snapshot__child_birthdays"], resp["date_created"]
),
name="Child ages rounded",
),
ResponseDataColumn(
id="demographic__languages_spoken_at_home",
description="Freeform response to 'What language(s) does your family speak at home?'",
extractor=lambda resp: resp["demographic_snapshot__languages_spoken_at_home"],
name="Languages spoken at home",
),
ResponseDataColumn(
id="demographic__number_of_guardians",
description="Response to 'How many parents/guardians do your children live with?' - 1, 2, 3> [3 or more], varies",
extractor=lambda resp: resp["demographic_snapshot__number_of_guardians"],
name="Number of guardians",
),
ResponseDataColumn(
id="demographic__number_of_guardians_explanation",
description=(
"Freeform response to 'If the answer varies due to shared custody arrangements or travel, please "
"enter the number of parents/guardians your children are usually living with or explain.'"
),
extractor=lambda resp: resp[
"demographic_snapshot__number_of_guardians_explanation"
],
name="Number of guardians explanation",
),
ResponseDataColumn(
id="demographic__race_identification",
description=(
"Comma-separated list of all values checked for question 'What category(ies) does your family "
"identify as?', from list: White; Hispanic, Latino, or Spanish origin; Black or African American; "
"Asian; American Indian or Alaska Native; Middle Eastern or North African; Native Hawaiian or "
"Other Pacific Islander; Another race, ethnicity, or origin"
),
extractor=lambda resp: resp["demographic_snapshot__race_identification"],
name="Race",
),
ResponseDataColumn(
id="demographic__parent_age",
description=(
"Parent's response to question 'What is your age?'; options are <18, 18-21, 22-24, 25-29, 30-34, "
"35-39, 40-44, 45-49, 50s, 60s, >70"
),
extractor=lambda resp: resp["demographic_snapshot__age"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__parent_gender",
description=(
"Parent's response to question 'What is your gender?'; options are m [male], f [female], o "
"[other], na [prefer not to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__gender"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__education_level",
description=(
"Parent's response to question 'What is the highest level of education you've completed?'; options "
"are some [some or attending high school], hs [high school diploma or GED], col [some or attending "
"college], assoc [2-year college degree], bach [4-year college degree], grad [some or attending "
"graduate or professional school], prof [graduate or professional degree]"
),
extractor=lambda resp: resp["demographic_snapshot__education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__spouse_education_level",
description=(
"Parent's response to question 'What is the highest level of education your spouse has "
"completed?'; options are some [some or attending high school], hs [high school diploma or GED], "
"col [some or attending college], assoc [2-year college degree], bach [4-year college degree], "
"grad [some or attending graduate or professional school], prof [graduate or professional degree], "
"na [not applicable - no spouse or partner]"
),
extractor=lambda resp: resp["demographic_snapshot__spouse_education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__annual_income",
description=(
"Parent's response to question 'What is your approximate family yearly income (in US dollars)?'; "
"options are 0, 5000, 10000, 15000, 20000-19000 in increments of 10000, >200000, or na [prefer not "
"to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__annual_income"],
name="Annual income",
),
ResponseDataColumn(
id="demographic__number_of_books",
description="Parent's response to question 'About how many children's books are there in your home?'; integer",
extractor=lambda resp: resp["demographic_snapshot__number_of_books"],
name="Number of books",
),
ResponseDataColumn(
id="demographic__additional_comments",
description="Parent's freeform response to question 'Anything else you'd like us to know?'",
extractor=lambda resp: resp["demographic_snapshot__additional_comments"],
name="Additional comments",
),
ResponseDataColumn(
id="demographic__country",
description="Parent's response to question 'What country do you live in?'; 2-letter country code",
extractor=lambda resp: resp["demographic_snapshot__country"],
name="Country code",
),
ResponseDataColumn(
id="demographic__state",
description=(
"Parent's response to question 'What state do you live in?' if country is US; 2-letter state "
"abbreviation"
),
extractor=lambda resp: resp["demographic_snapshot__state"],
name="US State",
),
ResponseDataColumn(
id="demographic__density",
description=(
"Parent's response to question 'How would you describe the area where you live?'; options are "
"urban, suburban, rural"
),
extractor=lambda resp: resp["demographic_snapshot__density"],
name="Density",
),
ResponseDataColumn(
id="demographic__lookit_referrer",
description="Parent's freeform response to question 'How did you hear about Lookit?'",
extractor=lambda resp: resp["demographic_snapshot__lookit_referrer"],
name="How you heard about Lookit",
),
]
# Which headers from the response data summary should go in the child data downloads
CHILD_CSV_HEADERS = [
col.id
for col in RESPONSE_COLUMNS
if col.id.startswith("child__") or col.id.startswith("participant__")
]
IDENTIFIABLE_DATA_HEADERS = {col.id for col in RESPONSE_COLUMNS if col.identifiable}
def get_response_headers(
selected_header_ids: Union[Set, List],
all_available_header_ids: Union[Set, KeysView],
) -> List:
"""Get ordered list of response headers for download.
Select and order the appropriate headers to include in a file download, based on
which optional headers are selected and which headers are available.
Args:
selected_header_ids: which optional headers to include (corresponding to id values in
RESPONSE_COLUMNS). Headers that are specified as optional in RESPONSE_COLUMNS will
only be included if listed in selected_header_ids.
all_available_header_ids: all header ids we have data for. Any header ids that are in
this set but not in RESPONSE_COLUMNS will be added to the end of the output list.
Returns:
List of headers to include, consisting of the following in order:
1) Headers in RESPONSE_COLUMNS, in order, omitting any that are optional and were not selected
2) Extra headers from all_available_header_ids not included in (1), in alpha order
"""
unselected_optional_ids = {
col.id
for col in RESPONSE_COLUMNS
if col.optional and col.id not in selected_header_ids
}
selected_standard_header_ids = [
col.id
for col in RESPONSE_COLUMNS[0:-2]
if col.id not in unselected_optional_ids
]
return selected_standard_header_ids + sorted(
list(
all_available_header_ids
- set(selected_standard_header_ids)
- unselected_optional_ids
)
)
def get_demographic_headers(selected_header_ids=None) -> List[str]:
"""Get ordered list of demographic headers for download.
Args:
selected_header_ids(set or list): which optional headers to include (corresponding
to id values in DEMOGRAPHIC_COLUMNS).
Returns:
Ordered list of headers to include in download
Headers are id values from DEMOGRAPHIC_COLUMNS in order, omitting any that are optional
and were not included in selected_header_ids.
"""
if selected_header_ids is None:
selected_header_ids = {}
return [
col.id
for col in DEMOGRAPHIC_COLUMNS
if col.id in selected_header_ids or not col.optional
]
def construct_response_dictionary(
resp, columns, optional_headers, include_exp_data=True
):
if optional_headers is None:
optional_headers = {}
resp_dict = {}
for col in columns:
if col.id in optional_headers or not col.optional:
try:
object_name, field_name = col.id.split("__")
if object_name in resp_dict:
resp_dict[object_name][field_name] = col.extractor(resp)
else:
resp_dict[object_name] = {field_name: col.extractor(resp)}
except ValueError:
resp_dict[col.id] = col.extractor(resp)
# Include exp_data field in dictionary?
if include_exp_data:
resp_dict["exp_data"] = resp.exp_data
return resp_dict
class FrameDataRow(NamedTuple):
response_uuid: str
child_hashed_id: str
frame_id: str
event_number: str
key: str
value: str
FRAME_DATA_HEADER_DESCRIPTIONS = {
"response_uuid": "Unique identifier for this response; can be matched to summary data and video filenames",
"child_hashed_id": (
"Hashed identifier for the child associated with this response; can be matched to summary data "
"child_hashed_id. This random ID may be published directly; it is specific to this study. If you "
"need to match children across multiple studies, use the child_global_id."
),
"frame_id": (
"Identifier for the particular frame responsible for this data; matches up to an element in the "
"response_sequence in the summary data file"
),
"event_number": (
"Index of the event responsible for this data, if this is an event. Indexes start from 0 within each "
"frame (and within global data) within each response. Blank for non-event data."
),
"key": "Label for a piece of data collected during this frame - for example, 'formData.child_favorite_animal'",
"value": "Value of the data associated with this key (of the indexed event if applicable) - for example, 'giraffe'",
}
def get_frame_data(resp: Union[Response, Dict]) -> List[FrameDataRow]:
"""Get list of data stored in response's exp_data and global_event_timings fields.
Args:
resp(Response or dict): response data to process. If dict, must contain fields
child__uuid, study__uuid, study__salt, study__hash_digits, uuid, exp_data, and
global_event_timings.
Returns:
List of FrameDataRows each representing a single piece of data from global_event_timings or
exp_data. Descriptions of each field of the FrameDataRow are given in FRAME_DATA_HEADER_DESCRIPTIONS.
"""
if type(resp) is not dict:
resp = {
"child__uuid": resp.child.uuid,
"study__uuid": resp.study.uuid,
"study__salt": resp.study.salt,
"study__hash_digits": resp.study.hash_digits,
"uuid": resp.uuid,
"exp_data": resp.exp_data,
"global_event_timings": resp.global_event_timings,
}
frame_data_tuples = []
child_hashed_id = hash_id(
resp["child__uuid"],
resp["study__uuid"],
resp["study__salt"],
resp["study__hash_digits"],
)
# First add all of the global event timings as events with frame_id "global"
for (iEvent, event) in enumerate(resp["global_event_timings"]):
for (key, value) in event.items():
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id="global",
key=key,
event_number=str(iEvent),
value=value,
)
)
# Next add all data in exp_data
event_prefix = "eventTimings."
for frame_id, frame_data in resp["exp_data"].items():
for (key, value) in flatten_dict(frame_data).items():
# Process event data separately and include event_number within frame
if key.startswith(event_prefix):
key_pieces = key.split(".")
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=".".join(key_pieces[2:]),
event_number=str(key_pieces[1]),
value=value,
)
)
# omit frameType values from CSV
elif key == "frameType":
continue
# Omit the DOB from any exit survey
elif key == "birthDate" and frame_data.get("frameType", None) == "EXIT":
continue
# Omit empty generatedProperties values from CSV
elif key == "generatedProperties" and not value:
continue
# For all other data, create a regular entry with frame_id and no event #
else:
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=key,
event_number="",
value=value,
)
)
return frame_data_tuples
def build_framedata_dict_csv(writer, responses):
response_paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
unique_frame_ids = set()
event_keys = set()
unique_frame_keys_dict = {}
for page_num in response_paginator.page_range:
page_of_responses = response_paginator.page(page_num)
for resp in page_of_responses:
this_resp_data = get_frame_data(resp)
these_ids = {
d.frame_id.partition("-")[2]
for d in this_resp_data
if not d.frame_id == "global"
}
event_keys = event_keys | {
d.key for d in this_resp_data if d.event_number != ""
}
unique_frame_ids = unique_frame_ids | these_ids
for frame_id in these_ids:
these_keys = {
d.key
for d in this_resp_data
if d.frame_id.partition("-")[2] == frame_id and d.event_number == ""
}
if frame_id in unique_frame_keys_dict:
unique_frame_keys_dict[frame_id] = (
unique_frame_keys_dict[frame_id] | these_keys
)
else:
unique_frame_keys_dict[frame_id] = these_keys
# Start with general descriptions of high-level headers (child_id, response_id, etc.)
writer.writerows(
[
{"column": header, "description": description}
for (header, description) in FRAME_DATA_HEADER_DESCRIPTIONS.items()
]
)
writer.writerow(
{
"possible_frame_id": "global",
"frame_description": "Data not associated with a particular frame",
}
)
# Add placeholders to describe each frame type
unique_frame_ids = sorted(list(unique_frame_ids))
for frame_id in unique_frame_ids:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"frame_description": "RESEARCHER: INSERT FRAME DESCRIPTION",
}
)
unique_frame_keys = sorted(list(unique_frame_keys_dict[frame_id]))
for k in unique_frame_keys:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"possible_key": k,
"key_description": "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS KEY MEANS IN THIS FRAME",
}
)
event_keys = sorted(list(event_keys))
event_key_stock_descriptions = {
"eventType": (
"Descriptor for this event; determines what other data is available. Global event 'exitEarly' records "
"cases where the participant attempted to exit the study early by closing the tab/window or pressing F1 "
"or ctrl-X. RESEARCHER: INSERT DESCRIPTIONS OF PARTICULAR EVENTTYPES USED IN YOUR STUDY. (Note: you can "
"find a list of events recorded by each frame in the frame documentation at "
"https://lookit.github.io/ember-lookit-frameplayer, under the Events header.)"
),
"exitType": (
"Used in the global event exitEarly. Only value stored at this point is 'browserNavigationAttempt'"
),
"lastPageSeen": (
"Used in the global event exitEarly. Index of the frame the participant was on before exit attempt."
),
"pipeId": (
"Recorded by any event in a video-capture-equipped frame. Internal video ID used by Pipe service; only "
"useful for troubleshooting in rare cases."
),
"streamTime": (
"Recorded by any event in a video-capture-equipped frame. Indicates time within webcam "
"video (videoId) to nearest 0.1 second. If recording has not started yet, may be 0 or null."
),
"timestamp": "Recorded by all events. Timestamp of event in format e.g. 2019-11-07T17:14:43.626Z",
"videoId": (
"Recorded by any event in a video-capture-equipped frame. Filename (without .mp4 extension) of video "
"currently being recorded."
),
}
for k in event_keys:
writer.writerow(
{
"possible_frame_id": "any (event data)",
"possible_key": k,
"key_description": event_key_stock_descriptions.get(
k, "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS EVENT KEY MEANS"
),
}
)
def build_single_response_framedata_csv(response):
"""
Builds CSV file contents for frame-level data from a single response. Used for both
building zip archive of all response data & offering individual-file downloads on individual responses view.
"""
this_resp_data = get_frame_data(response)
output, writer = csv_namedtuple_writer(FrameDataRow)
writer.writerows(this_resp_data)
return output.getvalue()
class ResponseDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return study.responses_for_researcher(self.request.user).order_by(
self.get_ordering()
)
class DemographicDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return (
study.responses_for_researcher(self.request.user)
.order_by(self.get_ordering())
.select_related("child", "child__user", "study", "demographic_snapshot")
.values(
"uuid",
"date_created",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
"demographic_snapshot__uuid",
"demographic_snapshot__created_at",
"demographic_snapshot__number_of_children",
"demographic_snapshot__child_birthdays",
"demographic_snapshot__languages_spoken_at_home",
"demographic_snapshot__number_of_guardians",
"demographic_snapshot__number_of_guardians_explanation",
"demographic_snapshot__race_identification",
"demographic_snapshot__age",
"demographic_snapshot__gender",
"demographic_snapshot__education_level",
"demographic_snapshot__spouse_education_level",
"demographic_snapshot__annual_income",
"demographic_snapshot__number_of_books",
"demographic_snapshot__additional_comments",
"demographic_snapshot__country",
"demographic_snapshot__state",
"demographic_snapshot__density",
"demographic_snapshot__lookit_referrer",
"demographic_snapshot__extra",
)
)
class StudyResponsesList(ResponseDownloadMixin, generic.ListView):
"""
View to display a list of study responses.
"""
template_name = "studies/study_responses.html"
def get_ordering(self):
"""
Determine sort field and order. Sorting on id actually sorts on child id, not response id.
Sorting on status, actually sorts on 'completed' field, where we are alphabetizing
"in progress" and "completed"
"""
orderby = self.request.GET.get("sort", "id")
return orderby.replace("id", "child__id").replace("status", "completed")
def get_queryset(self):
return (
super()
.get_queryset()
.prefetch_related(
"consent_rulings__arbiter",
Prefetch(
"feedback",
queryset=Feedback.objects.select_related("researcher").order_by(
"-id"
),
),
)
)
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["study"] = study = self.study
paginated_responses = context["object_list"]
columns_included_in_summary = study.columns_included_in_summary()
columns_included_in_table = [
"child__hashed_id",
"response__uuid",
"response__id",
"response__status",
"response__completed",
"response__is_preview",
]
response_data = []
for resp in paginated_responses:
# Info needed for table display of individual responses
this_resp_data = {
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_table
}
# Exception - store actual date object for date created
this_resp_data["response__date_created"] = resp.date_created
# info needed for summary table shown at right
this_resp_data["summary"] = [
{
"name": col.name,
"value": col.extractor(resp),
"description": col.description,
}
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_summary
]
this_resp_data["videos"] = resp.videos.values("pk", "full_name")
for v in this_resp_data["videos"]:
v["display_name"] = (
v["full_name"]
.replace("videoStream_{}_".format(study.uuid), "...")
.replace("_{}_".format(resp.uuid), "...")
)
response_data.append(this_resp_data)
context["response_data"] = response_data
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
context["can_edit_feedback"] = self.request.user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, context["study"]
)
return context
def build_video_display_name(self, study_uuid, response_uuid, vid_name):
"""
Strips study_uuid and response_uuid out of video responses titles for better display.
"""
return ". . ." + ". . .".join(
vid_name.split(study_uuid + "_")[1].split("_" + response_uuid + "_")
)
class StudySingleResponseDownload(ResponseDownloadMixin, View):
"""
Download a single study response in the selected format with selected headers.
"""
def get(self, *args, **kwargs):
data_type = self.request.GET.get("data-type-selector", None)
if data_type not in ["json", "csv", "framedata"]:
raise SuspiciousOperation
response_id = self.request.GET.get("response_id", None)
try:
resp = self.get_queryset().get(pk=response_id)
except ObjectDoesNotExist:
raise SuspiciousOperation
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
extension = "json" if data_type == "json" else "csv"
filename = "{}_{}{}.{}".format(
study_name_for_files(study.name),
str(resp.uuid),
"_frames"
if data_type == "json"
else "_identifiable"
if IDENTIFIABLE_DATA_HEADERS & header_options
else "",
extension,
)
if data_type == "json":
cleaned_data = json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t",
default=str,
)
elif data_type == "csv":
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
header_list = get_response_headers(header_options, row_data.keys())
output, writer = csv_dict_output_and_writer(header_list)
writer.writerow(row_data)
cleaned_data = output.getvalue()
elif data_type == "framedata":
cleaned_data = build_single_response_framedata_csv(resp)
else:
raise SuspiciousOperation
response = HttpResponse(cleaned_data, content_type="text/{}".format(extension))
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponseVideoAttachment(
ResearcherLoginRequiredMixin, UserPassesTestMixin, StudyLookupMixin, View
):
"""
View that redirects to a requested video for a study response.
"""
raise_exception = True
@cached_property
def video(self):
# Only select the video from consented videos for this study
return self.study.videos_for_consented_responses.get(
pk=self.kwargs.get("video")
)
def can_view_this_video(self):
user = self.request.user
study = self.study
video = self.video
return user.is_researcher and (
(
user.has_study_perms(StudyPermission.READ_STUDY_RESPONSE_DATA, study)
and not video.response.is_preview
)
or (
user.has_study_perms(StudyPermission.READ_STUDY_PREVIEW_DATA, study)
and video.response.is_preview
)
)
test_func = can_view_this_video
def get(self, request, *args, **kwargs):
video = self.video
download_url = video.download_url
if self.request.GET.get("mode") == "download":
r = requests.get(download_url)
response = FileResponse(
File.open(io.BytesIO(r.content)),
filename=video.filename,
as_attachment=True,
)
return response
return redirect(download_url)
class StudyResponseSubmitFeedback(StudyLookupMixin, UserPassesTestMixin, View):
"""
View to create or edit response feedback.
"""
def user_can_edit_feedback(self):
user = self.request.user
study = self.study
# First check user has permission to be editing feedback from this study at all
if not user.is_researcher and user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, study
):
return False
# Check that the feedback_id (if given) is from this study
feedback_id = self.request.POST.get("feedback_id", None)
if feedback_id:
try:
feedback = Feedback.objects.get(id=feedback_id)
except ObjectDoesNotExist:
return False
if feedback.response.study_id != study.pk:
return False
# Check that the response_id (if given) is from this study
response_id = self.request.POST.get("response_id", None)
if response_id:
try:
response = Response.objects.get(id=int(response_id))
except ObjectDoesNotExist:
return False
if response.study_id != study.pk:
return False
return True
test_func = user_can_edit_feedback
def post(self, request, *args, **kwargs):
"""
Create or edit feedback. Pass feedback_id to edit existing feedback, or response_id to create new
feedback for that response.
"""
form_data = self.request.POST
user = self.request.user
study = self.study
feedback_id = form_data.get("feedback_id", None)
comment = form_data.get("comment", "")
if feedback_id:
Feedback.objects.filter(id=feedback_id).update(comment=comment)
else:
response_id = int(form_data.get("response_id"))
Feedback.objects.create(
response_id=response_id, researcher=user, comment=comment
)
return HttpResponseRedirect(
reverse("exp:study-responses-list", kwargs=dict(pk=study.pk))
)
class StudyResponsesConsentManager(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
generic.DetailView,
):
"""Manage consent videos from here: approve or reject as evidence of informed consent."""
template_name = "studies/study_responses_consent_ruling.html"
queryset = Study.objects.all()
raise_exception = True
def user_can_code_consent(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and (
user.has_study_perms(StudyPermission.CODE_STUDY_CONSENT, study)
or user.has_study_perms(StudyPermission.CODE_STUDY_PREVIEW_CONSENT, study)
)
test_func = user_can_code_consent
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Need to prefetch our responses with consent-footage videos.
study = context["study"]
# TODO: technically should not grant access to consent videos for preview data unless has that perm
# (or should clearly indicate that code_study_consent means preview + actual data)
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
responses = get_responses_with_current_rulings_and_videos(
study.id, preview_only
)
context["loaded_responses"] = responses
context["summary_statistics"] = get_consent_statistics(study.id, preview_only)
# Using a map for arbitrarily structured data - lists and objects that we can't just trivially shove onto
# data-* properties in HTML
response_key_value_store = {}
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
# two jobs - generate statistics and populate k/v store.
for response in page_of_responses:
response_json = response_key_value_store[str(response["uuid"])] = {}
response["uuid"] = str(response.pop("uuid"))
response_json["videos"] = response.pop("videos")
response_json["details"] = {
"general": {
"uuid": response["uuid"],
"global_event_timings": json.dumps(
response.pop("global_event_timings")
),
"sequence": json.dumps(response.pop("sequence")),
"completed": json.dumps(response.pop("completed")),
"date_created": str(response["date_created"]),
},
"participant": {
"hashed_id": hash_participant_id(response),
"uuid": str(response.pop("child__user__uuid")),
"nickname": response.pop("child__user__nickname"),
},
"child": {
"hashed_id": hash_child_id(response),
"uuid": str(response.pop("child__uuid")),
"name": response.pop("child__given_name"),
"birthday": str(response.pop("child__birthday")),
"gender": response.pop("child__gender"),
"additional_information": response.pop(
"child__additional_information"
),
},
}
# TODO: Use json_script template tag to create JSON that can be used in Javascript
# (see https://docs.djangoproject.com/en/3.0/ref/templates/builtins/#json-script)
context["response_key_value_store"] = json.dumps(response_key_value_store)
return context
def post(self, request, *args, **kwargs):
"""This is where consent rulings are submitted."""
form_data = self.request.POST
user = self.request.user
study = self.get_object()
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
# Only allow any action on preview responses unless full perms
responses = study.responses
if preview_only:
responses = responses.filter(is_preview=True)
comments = json.loads(form_data.get("comments"))
# We now accept pending rulings to reverse old reject/approve decisions.
for ruling in ("accepted", "rejected", "pending"):
judged_responses = responses.filter(uuid__in=form_data.getlist(ruling))
for response in judged_responses:
response.consent_rulings.create(
action=ruling,
arbiter=user,
comments=comments.pop(str(response.uuid), None),
)
response.save()
# if there are any comments left over, these will count as new rulings that are the same as the last.
if comments:
for resp_uuid, comment in comments.items():
response = responses.get(uuid=resp_uuid)
response.consent_rulings.create(
action=response.most_recent_ruling, arbiter=user, comments=comment
)
return HttpResponseRedirect(
reverse(
"exp:study-responses-consent-manager",
kwargs=dict(pk=self.get_object().pk),
)
)
def get(self, request, *args, **kwargs):
if self.get_object().study_type.is_external:
messages.error(request, "There is no consent manager for external studies.")
return HttpResponseRedirect(reverse("exp:study-detail", kwargs=kwargs))
else:
return super().get(request, *args, **kwargs)
class StudyResponsesAll(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
"""
StudyResponsesAll shows a variety of download options for response and child data
from a given study. (It does not actually show any data.)
"""
template_name = "studies/study_responses_all.html"
queryset = Study.objects.all()
http_method_names = ["get"]
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_delete_preview_data"] = self.request.user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, context["study"]
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDeletePreviewResponses(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
SingleObjectMixin,
View,
):
queryset = Study.objects.all()
def user_can_delete_preview_data(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, study
)
test_func = user_can_delete_preview_data
def post(self, request, *args, **kwargs):
"""
Post method on all responses view handles the 'delete all preview data' button.
"""
study = self.get_object()
# Note: delete all, not just consented!
preview_responses = study.responses.filter(is_preview=True).prefetch_related(
"videos", "responselog_set", "consent_rulings", "feedback"
)
paginator = Paginator(preview_responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
# response logs, consent rulings, feedback, videos will all be deleted
# via cascades - videos will be removed from S3 also on pre_delete hook
resp.delete()
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs={"pk": study.id})
)
class StudyResponsesJSON(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all study responses in JSON format.
"""
# Smaller pagination because individual responses may be large and we don't want the json representing 100
# responses in memory
paginate_by = 1
def make_chunk(self, paginator, page_num, header_options):
chunk = ""
if page_num == 1:
chunk = "[\n"
chunk += ",\n".join(
json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t", # Use tab rather than spaces to make file smaller (ex. 60MB -> 25MB)
default=str,
)
for resp in paginator.page(page_num)
)
if page_num == paginator.page_range[-1]:
chunk += "\n]"
else:
chunk += ",\n"
return chunk
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
filename = "{}_{}.json".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = StreamingHttpResponse(
(
self.make_chunk(paginator, page_num, header_options)
for page_num in paginator.page_range
),
content_type="text/json",
)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all study responses in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
headers = set()
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
# Add any new headers from this session
headers = headers | row_data.keys()
session_list.append(row_data)
header_options = set(self.request.GET.getlist("data_options"))
header_list = get_response_headers(header_options, headers)
output, writer = csv_dict_output_and_writer(header_list)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary for the study response summary in CSV format. Does not depend on actual response data.
"""
def build_summary_dict_csv(self, optional_headers_selected_ids):
"""
Builds CSV file contents for data dictionary corresponding to the overview CSV
"""
descriptions = {col.id: col.description for col in RESPONSE_COLUMNS}
header_list = get_response_headers(
optional_headers_selected_ids, descriptions.keys()
)
all_descriptions = [
{"column": header, "description": descriptions[header]}
for header in header_list
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
header_options = self.request.GET.getlist("data_options")
cleaned_data = self.build_summary_dict_csv(header_options)
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-responses-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all children who participated in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
child_list = []
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
}
)
if row_data["child__global_id"] not in child_list:
child_list.append(row_data["child__global_id"])
session_list.append(row_data)
output, writer = csv_dict_output_and_writer(CHILD_CSV_HEADERS)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-identifiable"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary in CSV format for the summary of children who participated.
Does not depend on actual response data.
TODO: separate from response data mixin
"""
def build_child_dict_csv(self):
"""
Builds CSV file contents for data dictionary for overview of all child participants
"""
all_descriptions = [
{"column": col.id, "description": col.description}
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
cleaned_data = self.build_child_dict_csv()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesFrameDataCSV(ResponseDownloadMixin, generic.list.ListView):
"""Hitting this URL downloads a ZIP file with frame data from one response per file in CSV format"""
# TODO: with large files / many responses generation can take a while. Should generate asynchronously along
# with the data dict.
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
if study.study_type.is_external:
messages.error(
self.request, "Frame data is not available for External Studies."
)
return redirect(reverse("exp:study-responses-all", kwargs={"pk": study.pk}))
zipped_file = io.BytesIO() # import io
with zipfile.ZipFile(zipped_file, "w", zipfile.ZIP_DEFLATED) as zipped:
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
data = build_single_response_framedata_csv(resp)
filename = "{}_{}_{}.csv".format(
study_name_for_files(study.name), resp.uuid, "frames"
)
zipped.writestr(filename, data)
zipped_file.seek(0)
response = FileResponse(
zipped_file,
as_attachment=True,
filename="{}_framedata_per_session.zip".format(
study_name_for_files(study.name)
),
)
return response
class StudyResponsesFrameDataDictCSV(ResponseDownloadMixin, View):
"""
Hitting this URL queues creation of a template data dictionary for frame-level data in CSV format.
The file is put on GCP and a link is emailed to the user.
"""
def get(self, request, *args, **kwargs):
study = self.study
if study.study_type.is_external:
messages.error(
request, "Frame data dictionary is not available for external studies"
)
else:
filename = "{}_{}_{}".format(
study_name_for_files(study.name), study.uuid, "all-frames-dict"
)
build_framedata_dict.delay(filename, study.uuid, self.request.user.uuid)
messages.success(
request,
f"A frame data dictionary for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs=self.kwargs)
)
class StudyDemographics(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
"""
StudyDemographics view shows participant demographic snapshots associated
with each response to the study
"""
template_name = "studies/study_demographics.html"
queryset = Study.objects.all()
def get_context_data(self, **kwargs):
"""
Adds information for displaying how many and which types of responses are available.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDemographicsJSON(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in JSON format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
header_options = self.request.GET.getlist("demo_options")
json_responses = []
paginator = context["paginator"]
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
json_responses.append(
json.dumps(
construct_response_dictionary(
resp,
DEMOGRAPHIC_COLUMNS,
header_options,
include_exp_data=False,
),
indent="\t",
default=str,
)
)
cleaned_data = f"[ {", ".join(json_responses)} ]"
filename = "{}_{}.json".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/json")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
paginator = context["paginator"]
header_options = set(self.request.GET.getlist("demo_options"))
participant_list = []
headers_for_download = get_demographic_headers(header_options)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = {col.id: col.extractor(resp) for col in DEMOGRAPHIC_COLUMNS}
participant_list.append(row_data)
output, writer = csv_dict_output_and_writer(headers_for_download)
writer.writerows(participant_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsDictCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a data dictionary for participant demographics in in CSV format.
Does not depend on any actual data.
"""
def render_to_response(self, context, **response_kwargs):
header_options = set(self.request.GET.getlist("demo_options"))
headers_for_download = get_demographic_headers(header_options)
all_descriptions = [
{"column": col.id, "description": col.description}
for col in DEMOGRAPHIC_COLUMNS
if col.id in headers_for_download
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(self.study.name), "all-demographic-snapshots-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyCollisionCheck(ResponseDownloadMixin, View):
"""
Hitting this URL checks for collisions among all child and account hashed IDs, and returns a string describing
any collisions (empty string if none).
"""
def get(self, request, *args, **kwargs):
study = self.study
responses = (
study.consented_responses.order_by("id")
.select_related("child", "child__user", "study")
.values(
"uuid",
"child__uuid",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
)
)
child_dict = {}
account_dict = {}
collision_text = ""
# Note: could also just check number of unique global vs. hashed IDs in full dataset;
# only checking one-by-one for more informative output.
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
participant_hashed_id = hash_participant_id(resp)
participant_global_id = resp["child__user__uuid"]
child_hashed_id = hash_child_id(resp)
child_global_id = resp["child__uuid"]
if participant_hashed_id in account_dict:
if participant_global_id != account_dict[participant_hashed_id]:
collision_text += "Participant hashed ID {} ({}, {})\n".format(
participant_hashed_id,
account_dict[participant_hashed_id],
participant_global_id,
)
else:
account_dict[participant_hashed_id] = participant_global_id
if child_hashed_id in child_dict:
if child_global_id != child_dict[child_hashed_id]:
collision_text += "Child hashed ID {} ({}, {})<br>".format(
child_hashed_id,
child_dict[child_hashed_id],
child_global_id,
)
else:
child_dict[child_hashed_id] = child_global_id
return JsonResponse({"collisions": collision_text})
class StudyAttachments(CanViewStudyResponsesMixin, generic.ListView):
"""
StudyAttachments View shows video attachments for the study
"""
template_name = "studies/study_attachments.html"
model = Video
paginate_by = 100
def get_ordering(self):
return self.request.GET.get("sort", "-created_at") or "-created_at"
def get_queryset(self):
"""Fetches all consented videos this user has access to.
Returns:
QuerySet: all videos from this study where response has been marked as
consented and response is of a type (preview/actual data) that user can view
Todo:
* use a helper (e.g. in queries) select_videos_for_user to fetch the
appropriate videos here and in build_zipfile_of_videos - deferring for the moment
to work out dependencies.
"""
study = self.study
videos = study.videos_for_consented_responses
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, study
):
videos = videos.filter(response__is_preview=True)
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, study
):
videos = videos.filter(response__is_preview=False)
match = self.request.GET.get("match", "")
if match:
videos = videos.filter(full_name__icontains=match)
return videos.order_by(self.get_ordering())
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["match"] = self.request.GET.get("match", "")
context["study"] = self.study
return context
def post(self, request, *args, **kwargs):
"""
Downloads study video
"""
match = self.request.GET.get("match", "")
study = self.study
if self.request.POST.get("all-attachments"):
build_zipfile_of_videos.delay(
f"{study.uuid}_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=False,
)
messages.success(
request,
f"An archive of videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
if self.request.POST.get("all-consent-videos"):
build_zipfile_of_videos.delay(
f"{study.uuid}_consent_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=True,
)
messages.success(
request,
f"An archive of consent videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-attachments", kwargs=self.kwargs)
)
|
import io
import json
import zipfile
from functools import cached_property
from typing import Callable, Dict, KeysView, List, NamedTuple, Set, Union
import requests
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ObjectDoesNotExist, SuspiciousOperation
from django.core.files import File
from django.core.paginator import Paginator
from django.db.models import Prefetch
from django.http import (
FileResponse,
HttpResponse,
HttpResponseRedirect,
JsonResponse,
StreamingHttpResponse,
)
from django.shortcuts import redirect, reverse
from django.views import generic
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.list import MultipleObjectMixin
from accounts.utils import (
hash_child_id,
hash_demographic_id,
hash_id,
hash_participant_id,
)
from exp.utils import (
RESPONSE_PAGE_SIZE,
csv_dict_output_and_writer,
csv_namedtuple_writer,
flatten_dict,
round_age,
round_ages_from_birthdays,
study_name_for_files,
)
from exp.views.mixins import (
CanViewStudyResponsesMixin,
ResearcherLoginRequiredMixin,
SingleObjectFetchProtocol,
StudyLookupMixin,
)
from studies.models import Feedback, Response, Study, Video
from studies.permissions import StudyPermission
from studies.queries import (
get_consent_statistics,
get_responses_with_current_rulings_and_videos,
)
from studies.tasks import build_framedata_dict, build_zipfile_of_videos
class ResponseDataColumn(NamedTuple):
# id: Unique key to identify data. Used as CSV column header and any portion before __ is used to create a
# sub-dictionary for JSON data.
id: str
description: str # Description for data dictionary
extractor: Callable[
[Union[Response, Dict]], Union[str, List]
] # Function to extract value from response instance or dict
optional: bool = False # is a column the user checks a box to include?
name: str = "" # used in template form for optional columns
include_by_default: bool = False # whether to initially check checkbox for field
identifiable: bool = False # used to determine filename signaling
# Columns for response downloads. Extractor functions expect Response instance
RESPONSE_COLUMNS = [
ResponseDataColumn(
id="response__id",
description="Short ID for this response",
extractor=lambda resp: str(resp.id),
name="Response ID",
),
ResponseDataColumn(
id="response__uuid",
description="Unique identifier for response. Can be used to match data to video filenames.",
extractor=lambda resp: str(resp.uuid),
name="Response UUID",
),
ResponseDataColumn(
id="response__date_created",
description="Timestamp for when participant began session, in format e.g. 2019-11-07 17:13:38.702958+00:00",
extractor=lambda resp: str(resp.date_created),
name="Date created",
),
ResponseDataColumn(
id="response__completed",
description=(
"Whether the participant submitted the exit survey; depending on study criteria, this may not align "
"with whether the session is considered complete. E.g., participant may have left early but submitted "
"exit survey, or may have completed all test trials but not exit survey."
),
extractor=lambda resp: resp.completed,
name="Completed",
),
ResponseDataColumn(
id="response__withdrawn",
description=(
"Whether the participant withdrew permission for viewing/use of study video beyond consent video. If "
"true, video will not be available and must not be used."
),
extractor=lambda resp: resp.withdrawn,
name="Withdrawn",
),
ResponseDataColumn(
id="response__parent_feedback",
description=(
"Freeform parent feedback entered into the exit survey, if any. This field may incidentally contain "
"identifying or sensitive information depending on what parents say, so it should be scrubbed or "
"omitted from published data."
),
extractor=lambda resp: resp.parent_feedback,
name="Parent feedback",
),
ResponseDataColumn(
id="response__birthdate_difference",
description=(
"Difference between birthdate entered in exit survey, if any, and birthdate of registered child "
"participating. Positive values mean that the birthdate from the exit survey is LATER. Blank if "
"no birthdate available from the exit survey."
),
extractor=lambda resp: resp.birthdate_difference,
name="Birthdate difference",
),
ResponseDataColumn(
id="response__video_privacy",
description=(
"Privacy level for videos selected during the exit survey, if the parent completed the exit survey. "
"Possible levels are 'private' (only people listed on your IRB protocol can view), 'scientific' "
"(can share for scientific/educational purposes), and 'public' (can also share for publicity). "
"In no cases may videos be shared for commercial purposes. If this is missing (e.g., family stopped "
"just after the consent form and did not complete the exit survey), you must treat the video as "
"private."
),
extractor=lambda resp: resp.privacy,
name="Video privacy level",
),
ResponseDataColumn(
id="response__databrary",
description=(
"Whether the parent agreed to share video data on Databrary - 'yes' or 'no'. If missing, you must "
"treat the video as if 'no' were selected. If 'yes', the video privacy selections also apply to "
"authorized Databrary users."
),
extractor=lambda resp: resp.databrary,
name="Databrary sharing",
),
ResponseDataColumn(
id="response__is_preview",
description=(
"Whether this response was generated by a researcher previewing the experiment. Preview data should "
"not be used in any actual analyses."
),
extractor=lambda resp: resp.is_preview,
name="Preview",
),
ResponseDataColumn(
id="consent__ruling",
description=(
"Most recent consent video ruling: one of 'accepted' (consent has been reviewed and judged to indidate "
"informed consent), 'rejected' (consent has been reviewed and judged not to indicate informed "
"consent -- e.g., video missing or parent did not read statement), or 'pending' (no current judgement, "
"e.g. has not been reviewed yet or waiting on parent email response')"
),
extractor=lambda resp: resp.most_recent_ruling,
),
ResponseDataColumn(
id="consent__arbiter",
description="Name associated with researcher account that made the most recent consent ruling",
extractor=lambda resp: resp.most_recent_ruling_arbiter,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="consent__comment",
description=(
"Comment associated with most recent consent ruling (may be used to track e.g. any cases where consent "
"was confirmed by email)"
),
extractor=lambda resp: resp.most_recent_ruling_comment,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="study__uuid",
description="Unique identifier of study associated with this response. Same for all responses to a given Lookit study.",
extractor=lambda resp: str(resp.study.uuid),
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, which "
"may have unintended privacy consequences. Researchers can use this ID to match participants across "
"studies (subject to their own IRB review), but would need to generate their own random participant "
"IDs for publication in that case. Use participant_hashed_id as a publication-safe alternative if "
"only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp.child.user.uuid),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple responses "
"from a child and for siblings, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.user.uuid,
resp.study.uuid,
resp.study.salt,
resp.study.hash_digits,
),
name="Parent ID",
),
ResponseDataColumn(
id="participant__nickname",
description=(
"Nickname associated with the family account for this response - generally the mom or dad's name. "
"Must be redacted for publication."
),
extractor=lambda resp: resp.child.user.nickname,
optional=True,
name="Parent name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__global_id",
description=(
"Primary unique identifier for the child associated with this response. Will be the same for multiple "
"responses from one child, even across different Lookit studies. MUST BE REDACTED FOR PUBLICATION "
"because this allows identification of children across different published studies, which may have "
"unintended privacy consequences. Researchers can use this ID to match participants across studies "
"(subject to their own IRB review), but would need to generate their own random participant IDs for "
"publication in that case. Use child_hashed_id as a publication-safe alternative if only analyzing "
"data from one Lookit study."
),
extractor=lambda resp: str(resp.child.uuid),
optional=True,
name="Child global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__hashed_id",
description=(
"Identifier for child associated with this response. Will be the same for multiple responses from a "
"child, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.uuid, resp.study.uuid, resp.study.salt, resp.study.hash_digits
),
name="Child ID",
),
ResponseDataColumn(
id="child__name",
description=(
"Nickname for the child associated with this response. Not necessarily a real name (we encourage "
"initials, nicknames, etc. if parents aren't comfortable providing a name) but must be redacted for "
"publication of data."
),
extractor=lambda resp: resp.child.given_name,
optional=True,
name="Child name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__birthday",
description=(
"Birthdate of child associated with this response. Must be redacted for publication of data (switch to "
"age at time of participation; either use rounded age, jitter the age, or redact timestamps of "
"participation)."
),
extractor=lambda resp: resp.child.birthday,
optional=True,
name="Birthdate",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_in_days",
description=(
"Age in days at time of response of child associated with this response, exact. This can be used in "
"conjunction with timestamps to calculate the child's birthdate, so must be jittered or redacted prior "
"to publication unless no timestamp information is shared."
),
extractor=lambda resp: (resp.date_created.date() - resp.child.birthday).days,
optional=True,
name="Age in days",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_rounded",
description=(
"Age in days at time of response of child associated with this response, rounded to the nearest 10 "
"days if under 1 year old and to the nearest 30 days if over 1 year old. May be published; however, if "
"you have more than a few sessions per participant it would be possible to infer the exact age in days "
"(and therefore birthdate) with some effort. In this case you might consider directly jittering "
"birthdates."
),
extractor=lambda resp: str(
round_age(int((resp.date_created.date() - resp.child.birthday).days))
)
if (resp.date_created and resp.child.birthday)
else "",
optional=True,
name="Rounded age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__gender",
description=(
"Parent-identified gender of child, one of 'm' (male), 'f' (female), 'o' (other), or 'na' (prefer not "
"to answer)"
),
extractor=lambda resp: resp.child.gender,
optional=True,
name="Child gender",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__age_at_birth",
description=(
"Gestational age at birth in weeks. One of '40 or more weeks', '39 weeks' through '24 weeks', "
"'Under 24 weeks', or 'Not sure or prefer not to answer'"
),
extractor=lambda resp: resp.child.age_at_birth,
optional=True,
name="Child gestational age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__language_list",
description="List of languages spoken (using language codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.language_list,
optional=True,
name="Child languages",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__condition_list",
description="List of child characteristics (using condition/characteristic codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.condition_list,
optional=True,
name="Child conditions",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__additional_information",
description=(
"Free response 'anything else you'd like us to know' field on child registration form for child "
"associated with this response. Should be redacted or reviewed prior to publication as it may include "
"names or other identifying information."
),
extractor=lambda resp: resp.child.additional_information,
optional=True,
name="Child additional information",
include_by_default=True,
identifiable=True,
),
ResponseDataColumn(
id="response__sequence",
description=(
"Each response_sequence.N field (response_sequence.0, response_sequence.1, etc.) gives the ID of the "
"Nth frame displayed during the session associated with this response. Responses may have different "
"sequences due to randomization or if a participant leaves early."
),
extractor=lambda resp: resp.sequence,
name="Response sequence",
),
ResponseDataColumn(
id="response__conditions",
description=(
"RESEARCHERS: EXPAND THIS SECTION BASED ON YOUR INDIVIDUAL STUDY. Each set of "
"response_conditions.N.(...) fields give information about condition assignment during a particular "
"frame of this study. response_conditions.0.frameName is the frame ID (corresponding to a value in "
"response_sequence) where the randomization occurred. Additional fields such as "
"response_conditions.0.conditionNum depend on the specific randomizer frames used in this study."
),
extractor=lambda resp: [
{**{"frameName": cond_frame}, **conds}
for (cond_frame, conds) in resp.conditions.items()
],
),
]
# Columns for demographic data downloads. Extractor functions expect Response values dict,
# rather than instance.
DEMOGRAPHIC_COLUMNS = [
ResponseDataColumn(
id="response__uuid",
description=(
"Primary unique identifier for response. Can be used to match demographic data to response data "
"and video filenames; must be redacted prior to publication if videos are also published."
),
extractor=lambda resp: str(resp["uuid"]),
name="Response UUID",
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, "
"which may have unintended privacy consequences. Researchers can use this ID to match participants "
"across studies (subject to their own IRB review), but would need to generate their own random "
"participant IDs for publication in that case. Use participant__hashed_id as a publication-safe "
"alternative if only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp["child__user__uuid"]),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, but is unique to this study. This may be published "
"directly."
),
extractor=lambda resp: hash_participant_id(resp),
name="Participant ID",
),
ResponseDataColumn(
id="demographic__hashed_id",
description=(
"Identifier for this demographic snapshot. Changes upon updates to the demographic form, "
"so may vary within the same participant across responses."
),
extractor=lambda resp: hash_demographic_id(resp),
name="Demographic ID",
),
ResponseDataColumn(
id="demographic__date_created",
description=(
"Timestamp of creation of the demographic snapshot associated with this response, in format e.g. "
"2019-10-02 21:39:03.713283+00:00"
),
extractor=lambda resp: str(resp["demographic_snapshot__created_at"]),
name="Date created",
),
ResponseDataColumn(
id="demographic__number_of_children",
description="Response to 'How many children do you have?'; options 0-10 or >10 (More than 10)",
extractor=lambda resp: resp["demographic_snapshot__number_of_children"],
name="Number of children",
),
ResponseDataColumn(
id="demographic__child_rounded_ages",
description=(
"List of rounded ages based on child birthdays entered in demographic form (not based on children "
"registered). Ages are at time of response for this row, in days, rounded to nearest 10 for ages "
"under 1 year and nearest 30 otherwise. In format e.g. [60, 390]"
),
extractor=lambda resp: round_ages_from_birthdays(
resp["demographic_snapshot__child_birthdays"], resp["date_created"]
),
name="Child ages rounded",
),
ResponseDataColumn(
id="demographic__languages_spoken_at_home",
description="Freeform response to 'What language(s) does your family speak at home?'",
extractor=lambda resp: resp["demographic_snapshot__languages_spoken_at_home"],
name="Languages spoken at home",
),
ResponseDataColumn(
id="demographic__number_of_guardians",
description="Response to 'How many parents/guardians do your children live with?' - 1, 2, 3> [3 or more], varies",
extractor=lambda resp: resp["demographic_snapshot__number_of_guardians"],
name="Number of guardians",
),
ResponseDataColumn(
id="demographic__number_of_guardians_explanation",
description=(
"Freeform response to 'If the answer varies due to shared custody arrangements or travel, please "
"enter the number of parents/guardians your children are usually living with or explain.'"
),
extractor=lambda resp: resp[
"demographic_snapshot__number_of_guardians_explanation"
],
name="Number of guardians explanation",
),
ResponseDataColumn(
id="demographic__race_identification",
description=(
"Comma-separated list of all values checked for question 'What category(ies) does your family "
"identify as?', from list: White; Hispanic, Latino, or Spanish origin; Black or African American; "
"Asian; American Indian or Alaska Native; Middle Eastern or North African; Native Hawaiian or "
"Other Pacific Islander; Another race, ethnicity, or origin"
),
extractor=lambda resp: resp["demographic_snapshot__race_identification"],
name="Race",
),
ResponseDataColumn(
id="demographic__parent_age",
description=(
"Parent's response to question 'What is your age?'; options are <18, 18-21, 22-24, 25-29, 30-34, "
"35-39, 40-44, 45-49, 50s, 60s, >70"
),
extractor=lambda resp: resp["demographic_snapshot__age"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__parent_gender",
description=(
"Parent's response to question 'What is your gender?'; options are m [male], f [female], o "
"[other], na [prefer not to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__gender"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__education_level",
description=(
"Parent's response to question 'What is the highest level of education you've completed?'; options "
"are some [some or attending high school], hs [high school diploma or GED], col [some or attending "
"college], assoc [2-year college degree], bach [4-year college degree], grad [some or attending "
"graduate or professional school], prof [graduate or professional degree]"
),
extractor=lambda resp: resp["demographic_snapshot__education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__spouse_education_level",
description=(
"Parent's response to question 'What is the highest level of education your spouse has "
"completed?'; options are some [some or attending high school], hs [high school diploma or GED], "
"col [some or attending college], assoc [2-year college degree], bach [4-year college degree], "
"grad [some or attending graduate or professional school], prof [graduate or professional degree], "
"na [not applicable - no spouse or partner]"
),
extractor=lambda resp: resp["demographic_snapshot__spouse_education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__annual_income",
description=(
"Parent's response to question 'What is your approximate family yearly income (in US dollars)?'; "
"options are 0, 5000, 10000, 15000, 20000-19000 in increments of 10000, >200000, or na [prefer not "
"to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__annual_income"],
name="Annual income",
),
ResponseDataColumn(
id="demographic__number_of_books",
description="Parent's response to question 'About how many children's books are there in your home?'; integer",
extractor=lambda resp: resp["demographic_snapshot__number_of_books"],
name="Number of books",
),
ResponseDataColumn(
id="demographic__additional_comments",
description="Parent's freeform response to question 'Anything else you'd like us to know?'",
extractor=lambda resp: resp["demographic_snapshot__additional_comments"],
name="Additional comments",
),
ResponseDataColumn(
id="demographic__country",
description="Parent's response to question 'What country do you live in?'; 2-letter country code",
extractor=lambda resp: resp["demographic_snapshot__country"],
name="Country code",
),
ResponseDataColumn(
id="demographic__state",
description=(
"Parent's response to question 'What state do you live in?' if country is US; 2-letter state "
"abbreviation"
),
extractor=lambda resp: resp["demographic_snapshot__state"],
name="US State",
),
ResponseDataColumn(
id="demographic__density",
description=(
"Parent's response to question 'How would you describe the area where you live?'; options are "
"urban, suburban, rural"
),
extractor=lambda resp: resp["demographic_snapshot__density"],
name="Density",
),
ResponseDataColumn(
id="demographic__lookit_referrer",
description="Parent's freeform response to question 'How did you hear about Lookit?'",
extractor=lambda resp: resp["demographic_snapshot__lookit_referrer"],
name="How you heard about Lookit",
),
]
# Which headers from the response data summary should go in the child data downloads
CHILD_CSV_HEADERS = [
col.id
for col in RESPONSE_COLUMNS
if col.id.startswith("child__") or col.id.startswith("participant__")
]
IDENTIFIABLE_DATA_HEADERS = {col.id for col in RESPONSE_COLUMNS if col.identifiable}
def get_response_headers(
selected_header_ids: Union[Set, List],
all_available_header_ids: Union[Set, KeysView],
) -> List:
"""Get ordered list of response headers for download.
Select and order the appropriate headers to include in a file download, based on
which optional headers are selected and which headers are available.
Args:
selected_header_ids: which optional headers to include (corresponding to id values in
RESPONSE_COLUMNS). Headers that are specified as optional in RESPONSE_COLUMNS will
only be included if listed in selected_header_ids.
all_available_header_ids: all header ids we have data for. Any header ids that are in
this set but not in RESPONSE_COLUMNS will be added to the end of the output list.
Returns:
List of headers to include, consisting of the following in order:
1) Headers in RESPONSE_COLUMNS, in order, omitting any that are optional and were not selected
2) Extra headers from all_available_header_ids not included in (1), in alpha order
"""
unselected_optional_ids = {
col.id
for col in RESPONSE_COLUMNS
if col.optional and col.id not in selected_header_ids
}
selected_standard_header_ids = [
col.id
for col in RESPONSE_COLUMNS[0:-2]
if col.id not in unselected_optional_ids
]
return selected_standard_header_ids + sorted(
list(
all_available_header_ids
- set(selected_standard_header_ids)
- unselected_optional_ids
)
)
def get_demographic_headers(selected_header_ids=None) -> List[str]:
"""Get ordered list of demographic headers for download.
Args:
selected_header_ids(set or list): which optional headers to include (corresponding
to id values in DEMOGRAPHIC_COLUMNS).
Returns:
Ordered list of headers to include in download
Headers are id values from DEMOGRAPHIC_COLUMNS in order, omitting any that are optional
and were not included in selected_header_ids.
"""
if selected_header_ids is None:
selected_header_ids = {}
return [
col.id
for col in DEMOGRAPHIC_COLUMNS
if col.id in selected_header_ids or not col.optional
]
def construct_response_dictionary(
resp, columns, optional_headers, include_exp_data=True
):
if optional_headers is None:
optional_headers = {}
resp_dict = {}
for col in columns:
if col.id in optional_headers or not col.optional:
try:
object_name, field_name = col.id.split("__")
if object_name in resp_dict:
resp_dict[object_name][field_name] = col.extractor(resp)
else:
resp_dict[object_name] = {field_name: col.extractor(resp)}
except ValueError:
resp_dict[col.id] = col.extractor(resp)
# Include exp_data field in dictionary?
if include_exp_data:
resp_dict["exp_data"] = resp.exp_data
return resp_dict
class FrameDataRow(NamedTuple):
response_uuid: str
child_hashed_id: str
frame_id: str
event_number: str
key: str
value: str
FRAME_DATA_HEADER_DESCRIPTIONS = {
"response_uuid": "Unique identifier for this response; can be matched to summary data and video filenames",
"child_hashed_id": (
"Hashed identifier for the child associated with this response; can be matched to summary data "
"child_hashed_id. This random ID may be published directly; it is specific to this study. If you "
"need to match children across multiple studies, use the child_global_id."
),
"frame_id": (
"Identifier for the particular frame responsible for this data; matches up to an element in the "
"response_sequence in the summary data file"
),
"event_number": (
"Index of the event responsible for this data, if this is an event. Indexes start from 0 within each "
"frame (and within global data) within each response. Blank for non-event data."
),
"key": "Label for a piece of data collected during this frame - for example, 'formData.child_favorite_animal'",
"value": "Value of the data associated with this key (of the indexed event if applicable) - for example, 'giraffe'",
}
def get_frame_data(resp: Union[Response, Dict]) -> List[FrameDataRow]:
"""Get list of data stored in response's exp_data and global_event_timings fields.
Args:
resp(Response or dict): response data to process. If dict, must contain fields
child__uuid, study__uuid, study__salt, study__hash_digits, uuid, exp_data, and
global_event_timings.
Returns:
List of FrameDataRows each representing a single piece of data from global_event_timings or
exp_data. Descriptions of each field of the FrameDataRow are given in FRAME_DATA_HEADER_DESCRIPTIONS.
"""
if type(resp) is not dict:
resp = {
"child__uuid": resp.child.uuid,
"study__uuid": resp.study.uuid,
"study__salt": resp.study.salt,
"study__hash_digits": resp.study.hash_digits,
"uuid": resp.uuid,
"exp_data": resp.exp_data,
"global_event_timings": resp.global_event_timings,
}
frame_data_tuples = []
child_hashed_id = hash_id(
resp["child__uuid"],
resp["study__uuid"],
resp["study__salt"],
resp["study__hash_digits"],
)
# First add all of the global event timings as events with frame_id "global"
for (iEvent, event) in enumerate(resp["global_event_timings"]):
for (key, value) in event.items():
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id="global",
key=key,
event_number=str(iEvent),
value=value,
)
)
# Next add all data in exp_data
event_prefix = "eventTimings."
for frame_id, frame_data in resp["exp_data"].items():
for (key, value) in flatten_dict(frame_data).items():
# Process event data separately and include event_number within frame
if key.startswith(event_prefix):
key_pieces = key.split(".")
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=".".join(key_pieces[2:]),
event_number=str(key_pieces[1]),
value=value,
)
)
# omit frameType values from CSV
elif key == "frameType":
continue
# Omit the DOB from any exit survey
elif key == "birthDate" and frame_data.get("frameType", None) == "EXIT":
continue
# Omit empty generatedProperties values from CSV
elif key == "generatedProperties" and not value:
continue
# For all other data, create a regular entry with frame_id and no event #
else:
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=key,
event_number="",
value=value,
)
)
return frame_data_tuples
def build_framedata_dict_csv(writer, responses):
response_paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
unique_frame_ids = set()
event_keys = set()
unique_frame_keys_dict = {}
for page_num in response_paginator.page_range:
page_of_responses = response_paginator.page(page_num)
for resp in page_of_responses:
this_resp_data = get_frame_data(resp)
these_ids = {
d.frame_id.partition("-")[2]
for d in this_resp_data
if not d.frame_id == "global"
}
event_keys = event_keys | {
d.key for d in this_resp_data if d.event_number != ""
}
unique_frame_ids = unique_frame_ids | these_ids
for frame_id in these_ids:
these_keys = {
d.key
for d in this_resp_data
if d.frame_id.partition("-")[2] == frame_id and d.event_number == ""
}
if frame_id in unique_frame_keys_dict:
unique_frame_keys_dict[frame_id] = (
unique_frame_keys_dict[frame_id] | these_keys
)
else:
unique_frame_keys_dict[frame_id] = these_keys
# Start with general descriptions of high-level headers (child_id, response_id, etc.)
writer.writerows(
[
{"column": header, "description": description}
for (header, description) in FRAME_DATA_HEADER_DESCRIPTIONS.items()
]
)
writer.writerow(
{
"possible_frame_id": "global",
"frame_description": "Data not associated with a particular frame",
}
)
# Add placeholders to describe each frame type
unique_frame_ids = sorted(list(unique_frame_ids))
for frame_id in unique_frame_ids:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"frame_description": "RESEARCHER: INSERT FRAME DESCRIPTION",
}
)
unique_frame_keys = sorted(list(unique_frame_keys_dict[frame_id]))
for k in unique_frame_keys:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"possible_key": k,
"key_description": "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS KEY MEANS IN THIS FRAME",
}
)
event_keys = sorted(list(event_keys))
event_key_stock_descriptions = {
"eventType": (
"Descriptor for this event; determines what other data is available. Global event 'exitEarly' records "
"cases where the participant attempted to exit the study early by closing the tab/window or pressing F1 "
"or ctrl-X. RESEARCHER: INSERT DESCRIPTIONS OF PARTICULAR EVENTTYPES USED IN YOUR STUDY. (Note: you can "
"find a list of events recorded by each frame in the frame documentation at "
"https://lookit.github.io/ember-lookit-frameplayer, under the Events header.)"
),
"exitType": (
"Used in the global event exitEarly. Only value stored at this point is 'browserNavigationAttempt'"
),
"lastPageSeen": (
"Used in the global event exitEarly. Index of the frame the participant was on before exit attempt."
),
"pipeId": (
"Recorded by any event in a video-capture-equipped frame. Internal video ID used by Pipe service; only "
"useful for troubleshooting in rare cases."
),
"streamTime": (
"Recorded by any event in a video-capture-equipped frame. Indicates time within webcam "
"video (videoId) to nearest 0.1 second. If recording has not started yet, may be 0 or null."
),
"timestamp": "Recorded by all events. Timestamp of event in format e.g. 2019-11-07T17:14:43.626Z",
"videoId": (
"Recorded by any event in a video-capture-equipped frame. Filename (without .mp4 extension) of video "
"currently being recorded."
),
}
for k in event_keys:
writer.writerow(
{
"possible_frame_id": "any (event data)",
"possible_key": k,
"key_description": event_key_stock_descriptions.get(
k, "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS EVENT KEY MEANS"
),
}
)
def build_single_response_framedata_csv(response):
"""
Builds CSV file contents for frame-level data from a single response. Used for both
building zip archive of all response data & offering individual-file downloads on individual responses view.
"""
this_resp_data = get_frame_data(response)
output, writer = csv_namedtuple_writer(FrameDataRow)
writer.writerows(this_resp_data)
return output.getvalue()
class ResponseDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return study.responses_for_researcher(self.request.user).order_by(
self.get_ordering()
)
class DemographicDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return (
study.responses_for_researcher(self.request.user)
.order_by(self.get_ordering())
.select_related("child", "child__user", "study", "demographic_snapshot")
.values(
"uuid",
"date_created",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
"demographic_snapshot__uuid",
"demographic_snapshot__created_at",
"demographic_snapshot__number_of_children",
"demographic_snapshot__child_birthdays",
"demographic_snapshot__languages_spoken_at_home",
"demographic_snapshot__number_of_guardians",
"demographic_snapshot__number_of_guardians_explanation",
"demographic_snapshot__race_identification",
"demographic_snapshot__age",
"demographic_snapshot__gender",
"demographic_snapshot__education_level",
"demographic_snapshot__spouse_education_level",
"demographic_snapshot__annual_income",
"demographic_snapshot__number_of_books",
"demographic_snapshot__additional_comments",
"demographic_snapshot__country",
"demographic_snapshot__state",
"demographic_snapshot__density",
"demographic_snapshot__lookit_referrer",
"demographic_snapshot__extra",
)
)
class StudyResponsesList(ResponseDownloadMixin, generic.ListView):
"""
View to display a list of study responses.
"""
template_name = "studies/study_responses.html"
def get_ordering(self):
"""
Determine sort field and order. Sorting on id actually sorts on child id, not response id.
Sorting on status, actually sorts on 'completed' field, where we are alphabetizing
"in progress" and "completed"
"""
orderby = self.request.GET.get("sort", "id")
return orderby.replace("id", "child__id").replace("status", "completed")
def get_queryset(self):
return (
super()
.get_queryset()
.prefetch_related(
"consent_rulings__arbiter",
Prefetch(
"feedback",
queryset=Feedback.objects.select_related("researcher").order_by(
"-id"
),
),
)
)
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["study"] = study = self.study
paginated_responses = context["object_list"]
columns_included_in_summary = study.columns_included_in_summary()
columns_included_in_table = [
"child__hashed_id",
"response__uuid",
"response__id",
"response__status",
"response__completed",
"response__is_preview",
]
response_data = []
for resp in paginated_responses:
# Info needed for table display of individual responses
this_resp_data = {
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_table
}
# Exception - store actual date object for date created
this_resp_data["response__date_created"] = resp.date_created
# info needed for summary table shown at right
this_resp_data["summary"] = [
{
"name": col.name,
"value": col.extractor(resp),
"description": col.description,
}
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_summary
]
this_resp_data["videos"] = resp.videos.values("pk", "full_name")
for v in this_resp_data["videos"]:
v["display_name"] = (
v["full_name"]
.replace("videoStream_{}_".format(study.uuid), "...")
.replace("_{}_".format(resp.uuid), "...")
)
response_data.append(this_resp_data)
context["response_data"] = response_data
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
context["can_edit_feedback"] = self.request.user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, context["study"]
)
return context
def build_video_display_name(self, study_uuid, response_uuid, vid_name):
"""
Strips study_uuid and response_uuid out of video responses titles for better display.
"""
return ". . ." + ". . .".join(
vid_name.split(study_uuid + "_")[1].split("_" + response_uuid + "_")
)
class StudySingleResponseDownload(ResponseDownloadMixin, View):
"""
Download a single study response in the selected format with selected headers.
"""
def get(self, *args, **kwargs):
data_type = self.request.GET.get("data-type-selector", None)
if data_type not in ["json", "csv", "framedata"]:
raise SuspiciousOperation
response_id = self.request.GET.get("response_id", None)
try:
resp = self.get_queryset().get(pk=response_id)
except ObjectDoesNotExist:
raise SuspiciousOperation
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
extension = "json" if data_type == "json" else "csv"
filename = "{}_{}{}.{}".format(
study_name_for_files(study.name),
str(resp.uuid),
"_frames"
if data_type == "json"
else "_identifiable"
if IDENTIFIABLE_DATA_HEADERS & header_options
else "",
extension,
)
if data_type == "json":
cleaned_data = json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t",
default=str,
)
elif data_type == "csv":
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
header_list = get_response_headers(header_options, row_data.keys())
output, writer = csv_dict_output_and_writer(header_list)
writer.writerow(row_data)
cleaned_data = output.getvalue()
elif data_type == "framedata":
cleaned_data = build_single_response_framedata_csv(resp)
else:
raise SuspiciousOperation
response = HttpResponse(cleaned_data, content_type="text/{}".format(extension))
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponseVideoAttachment(
ResearcherLoginRequiredMixin, UserPassesTestMixin, StudyLookupMixin, View
):
"""
View that redirects to a requested video for a study response.
"""
raise_exception = True
@cached_property
def video(self):
# Only select the video from consented videos for this study
return self.study.videos_for_consented_responses.get(
pk=self.kwargs.get("video")
)
def can_view_this_video(self):
user = self.request.user
study = self.study
video = self.video
return user.is_researcher and (
(
user.has_study_perms(StudyPermission.READ_STUDY_RESPONSE_DATA, study)
and not video.response.is_preview
)
or (
user.has_study_perms(StudyPermission.READ_STUDY_PREVIEW_DATA, study)
and video.response.is_preview
)
)
test_func = can_view_this_video
def get(self, request, *args, **kwargs):
video = self.video
download_url = video.download_url
if self.request.GET.get("mode") == "download":
r = requests.get(download_url)
response = FileResponse(
File.open(io.BytesIO(r.content)),
filename=video.filename,
as_attachment=True,
)
return response
return redirect(download_url)
class StudyResponseSubmitFeedback(StudyLookupMixin, UserPassesTestMixin, View):
"""
View to create or edit response feedback.
"""
def user_can_edit_feedback(self):
user = self.request.user
study = self.study
# First check user has permission to be editing feedback from this study at all
if not user.is_researcher and user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, study
):
return False
# Check that the feedback_id (if given) is from this study
feedback_id = self.request.POST.get("feedback_id", None)
if feedback_id:
try:
feedback = Feedback.objects.get(id=feedback_id)
except ObjectDoesNotExist:
return False
if feedback.response.study_id != study.pk:
return False
# Check that the response_id (if given) is from this study
response_id = self.request.POST.get("response_id", None)
if response_id:
try:
response = Response.objects.get(id=int(response_id))
except ObjectDoesNotExist:
return False
if response.study_id != study.pk:
return False
return True
test_func = user_can_edit_feedback
def post(self, request, *args, **kwargs):
"""
Create or edit feedback. Pass feedback_id to edit existing feedback, or response_id to create new
feedback for that response.
"""
form_data = self.request.POST
user = self.request.user
study = self.study
feedback_id = form_data.get("feedback_id", None)
comment = form_data.get("comment", "")
if feedback_id:
Feedback.objects.filter(id=feedback_id).update(comment=comment)
else:
response_id = int(form_data.get("response_id"))
Feedback.objects.create(
response_id=response_id, researcher=user, comment=comment
)
return HttpResponseRedirect(
reverse("exp:study-responses-list", kwargs=dict(pk=study.pk))
)
class StudyResponsesConsentManager(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
generic.DetailView,
):
"""Manage consent videos from here: approve or reject as evidence of informed consent."""
template_name = "studies/study_responses_consent_ruling.html"
queryset = Study.objects.all()
raise_exception = True
def user_can_code_consent(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and (
user.has_study_perms(StudyPermission.CODE_STUDY_CONSENT, study)
or user.has_study_perms(StudyPermission.CODE_STUDY_PREVIEW_CONSENT, study)
)
test_func = user_can_code_consent
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Need to prefetch our responses with consent-footage videos.
study = context["study"]
# TODO: technically should not grant access to consent videos for preview data unless has that perm
# (or should clearly indicate that code_study_consent means preview + actual data)
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
responses = get_responses_with_current_rulings_and_videos(
study.id, preview_only
)
context["loaded_responses"] = responses
context["summary_statistics"] = get_consent_statistics(study.id, preview_only)
# Using a map for arbitrarily structured data - lists and objects that we can't just trivially shove onto
# data-* properties in HTML
response_key_value_store = {}
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
# two jobs - generate statistics and populate k/v store.
for response in page_of_responses:
response_json = response_key_value_store[str(response["uuid"])] = {}
response["uuid"] = str(response.pop("uuid"))
response_json["videos"] = response.pop("videos")
response_json["details"] = {
"general": {
"uuid": response["uuid"],
"global_event_timings": json.dumps(
response.pop("global_event_timings")
),
"sequence": json.dumps(response.pop("sequence")),
"completed": json.dumps(response.pop("completed")),
"date_created": str(response["date_created"]),
},
"participant": {
"hashed_id": hash_participant_id(response),
"uuid": str(response.pop("child__user__uuid")),
"nickname": response.pop("child__user__nickname"),
},
"child": {
"hashed_id": hash_child_id(response),
"uuid": str(response.pop("child__uuid")),
"name": response.pop("child__given_name"),
"birthday": str(response.pop("child__birthday")),
"gender": response.pop("child__gender"),
"additional_information": response.pop(
"child__additional_information"
),
},
}
# TODO: Use json_script template tag to create JSON that can be used in Javascript
# (see https://docs.djangoproject.com/en/3.0/ref/templates/builtins/#json-script)
context["response_key_value_store"] = json.dumps(response_key_value_store)
return context
def post(self, request, *args, **kwargs):
"""This is where consent rulings are submitted."""
form_data = self.request.POST
user = self.request.user
study = self.get_object()
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
# Only allow any action on preview responses unless full perms
responses = study.responses
if preview_only:
responses = responses.filter(is_preview=True)
comments = json.loads(form_data.get("comments"))
# We now accept pending rulings to reverse old reject/approve decisions.
for ruling in ("accepted", "rejected", "pending"):
judged_responses = responses.filter(uuid__in=form_data.getlist(ruling))
for response in judged_responses:
response.consent_rulings.create(
action=ruling,
arbiter=user,
comments=comments.pop(str(response.uuid), None),
)
response.save()
# if there are any comments left over, these will count as new rulings that are the same as the last.
if comments:
for resp_uuid, comment in comments.items():
response = responses.get(uuid=resp_uuid)
response.consent_rulings.create(
action=response.most_recent_ruling, arbiter=user, comments=comment
)
return HttpResponseRedirect(
reverse(
"exp:study-responses-consent-manager",
kwargs=dict(pk=self.get_object().pk),
)
)
def get(self, request, *args, **kwargs):
if self.get_object().study_type.is_external:
messages.error(request, "There is no consent manager for external studies.")
return HttpResponseRedirect(reverse("exp:study-detail", kwargs=kwargs))
else:
return super().get(request, *args, **kwargs)
class StudyResponsesAll(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
"""
StudyResponsesAll shows a variety of download options for response and child data
from a given study. (It does not actually show any data.)
"""
template_name = "studies/study_responses_all.html"
queryset = Study.objects.all()
http_method_names = ["get"]
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_delete_preview_data"] = self.request.user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, context["study"]
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDeletePreviewResponses(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
SingleObjectMixin,
View,
):
queryset = Study.objects.all()
def user_can_delete_preview_data(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, study
)
test_func = user_can_delete_preview_data
def post(self, request, *args, **kwargs):
"""
Post method on all responses view handles the 'delete all preview data' button.
"""
study = self.get_object()
# Note: delete all, not just consented!
preview_responses = study.responses.filter(is_preview=True).prefetch_related(
"videos", "responselog_set", "consent_rulings", "feedback"
)
paginator = Paginator(preview_responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
# response logs, consent rulings, feedback, videos will all be deleted
# via cascades - videos will be removed from S3 also on pre_delete hook
resp.delete()
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs={"pk": study.id})
)
class StudyResponsesJSON(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all study responses in JSON format.
"""
# Smaller pagination because individual responses may be large and we don't want the json representing 100
# responses in memory
paginate_by = 1
def make_chunk(self, paginator, page_num, header_options):
chunk = ""
if page_num == 1:
chunk = "[\n"
chunk += ",\n".join(
json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t", # Use tab rather than spaces to make file smaller (ex. 60MB -> 25MB)
default=str,
)
for resp in paginator.page(page_num)
)
if page_num == paginator.page_range[-1]:
chunk += "\n]"
else:
chunk += ",\n"
return chunk
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
filename = "{}_{}.json".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = StreamingHttpResponse(
(
self.make_chunk(paginator, page_num, header_options)
for page_num in paginator.page_range
),
content_type="text/json",
)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all study responses in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
headers = set()
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
# Add any new headers from this session
headers = headers | row_data.keys()
session_list.append(row_data)
header_options = set(self.request.GET.getlist("data_options"))
header_list = get_response_headers(header_options, headers)
output, writer = csv_dict_output_and_writer(header_list)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary for the study response summary in CSV format. Does not depend on actual response data.
"""
def build_summary_dict_csv(self, optional_headers_selected_ids):
"""
Builds CSV file contents for data dictionary corresponding to the overview CSV
"""
descriptions = {col.id: col.description for col in RESPONSE_COLUMNS}
header_list = get_response_headers(
optional_headers_selected_ids, descriptions.keys()
)
all_descriptions = [
{"column": header, "description": descriptions[header]}
for header in header_list
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
header_options = self.request.GET.getlist("data_options")
cleaned_data = self.build_summary_dict_csv(header_options)
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-responses-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all children who participated in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
child_list = []
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
}
)
if row_data["child__global_id"] not in child_list:
child_list.append(row_data["child__global_id"])
session_list.append(row_data)
output, writer = csv_dict_output_and_writer(CHILD_CSV_HEADERS)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-identifiable"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary in CSV format for the summary of children who participated.
Does not depend on actual response data.
TODO: separate from response data mixin
"""
def build_child_dict_csv(self):
"""
Builds CSV file contents for data dictionary for overview of all child participants
"""
all_descriptions = [
{"column": col.id, "description": col.description}
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
cleaned_data = self.build_child_dict_csv()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesFrameDataCSV(ResponseDownloadMixin, generic.list.ListView):
"""Hitting this URL downloads a ZIP file with frame data from one response per file in CSV format"""
# TODO: with large files / many responses generation can take a while. Should generate asynchronously along
# with the data dict.
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
if study.study_type.is_external:
messages.error(
self.request, "Frame data is not available for External Studies."
)
return redirect(reverse("exp:study-responses-all", kwargs={"pk": study.pk}))
zipped_file = io.BytesIO() # import io
with zipfile.ZipFile(zipped_file, "w", zipfile.ZIP_DEFLATED) as zipped:
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
data = build_single_response_framedata_csv(resp)
filename = "{}_{}_{}.csv".format(
study_name_for_files(study.name), resp.uuid, "frames"
)
zipped.writestr(filename, data)
zipped_file.seek(0)
response = FileResponse(
zipped_file,
as_attachment=True,
filename="{}_framedata_per_session.zip".format(
study_name_for_files(study.name)
),
)
return response
class StudyResponsesFrameDataDictCSV(ResponseDownloadMixin, View):
"""
Hitting this URL queues creation of a template data dictionary for frame-level data in CSV format.
The file is put on GCP and a link is emailed to the user.
"""
def get(self, request, *args, **kwargs):
study = self.study
if study.study_type.is_external:
messages.error(
request, "Frame data dictionary is not available for external studies"
)
else:
filename = "{}_{}_{}".format(
study_name_for_files(study.name), study.uuid, "all-frames-dict"
)
build_framedata_dict.delay(filename, study.uuid, self.request.user.uuid)
messages.success(
request,
f"A frame data dictionary for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs=self.kwargs)
)
class StudyDemographics(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
"""
StudyDemographics view shows participant demographic snapshots associated
with each response to the study
"""
template_name = "studies/study_demographics.html"
queryset = Study.objects.all()
def get_context_data(self, **kwargs):
"""
Adds information for displaying how many and which types of responses are available.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDemographicsJSON(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in JSON format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
header_options = self.request.GET.getlist("demo_options")
json_responses = []
paginator = context["paginator"]
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
json_responses.append(
json.dumps(
construct_response_dictionary(
resp,
DEMOGRAPHIC_COLUMNS,
header_options,
include_exp_data=False,
),
indent="\t",
default=str,
)
)
cleaned_data = f"[ {', '.join(json_responses)} ]"
filename = "{}_{}.json".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/json")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
paginator = context["paginator"]
header_options = set(self.request.GET.getlist("demo_options"))
participant_list = []
headers_for_download = get_demographic_headers(header_options)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = {col.id: col.extractor(resp) for col in DEMOGRAPHIC_COLUMNS}
participant_list.append(row_data)
output, writer = csv_dict_output_and_writer(headers_for_download)
writer.writerows(participant_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsDictCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a data dictionary for participant demographics in in CSV format.
Does not depend on any actual data.
"""
def render_to_response(self, context, **response_kwargs):
header_options = set(self.request.GET.getlist("demo_options"))
headers_for_download = get_demographic_headers(header_options)
all_descriptions = [
{"column": col.id, "description": col.description}
for col in DEMOGRAPHIC_COLUMNS
if col.id in headers_for_download
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(self.study.name), "all-demographic-snapshots-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyCollisionCheck(ResponseDownloadMixin, View):
"""
Hitting this URL checks for collisions among all child and account hashed IDs, and returns a string describing
any collisions (empty string if none).
"""
def get(self, request, *args, **kwargs):
study = self.study
responses = (
study.consented_responses.order_by("id")
.select_related("child", "child__user", "study")
.values(
"uuid",
"child__uuid",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
)
)
child_dict = {}
account_dict = {}
collision_text = ""
# Note: could also just check number of unique global vs. hashed IDs in full dataset;
# only checking one-by-one for more informative output.
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
participant_hashed_id = hash_participant_id(resp)
participant_global_id = resp["child__user__uuid"]
child_hashed_id = hash_child_id(resp)
child_global_id = resp["child__uuid"]
if participant_hashed_id in account_dict:
if participant_global_id != account_dict[participant_hashed_id]:
collision_text += "Participant hashed ID {} ({}, {})\n".format(
participant_hashed_id,
account_dict[participant_hashed_id],
participant_global_id,
)
else:
account_dict[participant_hashed_id] = participant_global_id
if child_hashed_id in child_dict:
if child_global_id != child_dict[child_hashed_id]:
collision_text += "Child hashed ID {} ({}, {})<br>".format(
child_hashed_id,
child_dict[child_hashed_id],
child_global_id,
)
else:
child_dict[child_hashed_id] = child_global_id
return JsonResponse({"collisions": collision_text})
class StudyAttachments(CanViewStudyResponsesMixin, generic.ListView):
"""
StudyAttachments View shows video attachments for the study
"""
template_name = "studies/study_attachments.html"
model = Video
paginate_by = 100
def get_ordering(self):
return self.request.GET.get("sort", "-created_at") or "-created_at"
def get_queryset(self):
"""Fetches all consented videos this user has access to.
Returns:
QuerySet: all videos from this study where response has been marked as
consented and response is of a type (preview/actual data) that user can view
Todo:
* use a helper (e.g. in queries) select_videos_for_user to fetch the
appropriate videos here and in build_zipfile_of_videos - deferring for the moment
to work out dependencies.
"""
study = self.study
videos = study.videos_for_consented_responses
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, study
):
videos = videos.filter(response__is_preview=True)
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, study
):
videos = videos.filter(response__is_preview=False)
match = self.request.GET.get("match", "")
if match:
videos = videos.filter(full_name__icontains=match)
return videos.order_by(self.get_ordering())
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["match"] = self.request.GET.get("match", "")
context["study"] = self.study
return context
def post(self, request, *args, **kwargs):
"""
Downloads study video
"""
match = self.request.GET.get("match", "")
study = self.study
if self.request.POST.get("all-attachments"):
build_zipfile_of_videos.delay(
f"{study.uuid}_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=False,
)
messages.success(
request,
f"An archive of videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
if self.request.POST.get("all-consent-videos"):
build_zipfile_of_videos.delay(
f"{study.uuid}_consent_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=True,
)
messages.success(
request,
f"An archive of consent videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-attachments", kwargs=self.kwargs)
)
|
#!/usr/bin/env python
# encoding: utf-8
"""
Script for installing the components of the ArPI home security system to a running Raspberry PI Zero Wifi host.
It uses the configuration file install.yaml!
---
@author: Gábor Kovács
@copyright: 2017 arpi-security.info. All rights reserved.
@contact: [email protected]
"""
import json
import logging
import subprocess
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from os import system
from os.path import join, exists
from socket import gaierror
from time import sleep
import paramiko
import yaml
from paramiko.ssh_exception import AuthenticationException, NoValidConnectionsError
from scp import SCPClient
from utils import (
deep_copy,
execute_remote,
generate_SSH_key,
list_copy,
print_lines,
show_progress
)
CONFIG = {}
logging.basicConfig(format="%(message)s")
logger = logging.getLogger()
logging.getLogger("paramiko").setLevel(logging.CRITICAL)
__all__ = []
__version__ = 0.1
__date__ = "2017-08-21"
__updated__ = "2019-08-21"
program_shortdesc = __import__("__main__").__doc__.split("---")[0]
program_license = """%s
Created by [email protected] on %s.
Copyright 2019 arpi-security.info. All rights reserved.
USAGE
""" % (
program_shortdesc,
str(__date__),
)
def get_connection():
try:
logger.info(
"Connecting with private key in '%s' %s@%s",
CONFIG["arpi_key_name"],
CONFIG["arpi_username"],
CONFIG["arpi_hostname"],
)
private_key = None
if exists(CONFIG["arpi_key_name"]):
private_key = paramiko.RSAKey.from_private_key_file(
CONFIG["arpi_key_name"], CONFIG["arpi_password"]
)
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
CONFIG["arpi_hostname"],
username=CONFIG["arpi_username"],
password=CONFIG["arpi_password"],
pkey=private_key,
)
logger.info("Connected")
except (AuthenticationException, NoValidConnectionsError, gaierror):
try:
logger.info("Connecting %s@%s", CONFIG["default_username"], CONFIG["default_hostname"])
ssh.connect(
CONFIG["default_hostname"],
username=CONFIG["default_username"],
password=CONFIG["default_password"],
)
logger.info("Connected")
except (NoValidConnectionsError, gaierror):
raise Exception("Can't connect to the host!")
return ssh
def install_environment():
"""
Install prerequisites to an empty Raspberry PI.
"""
if not exists(CONFIG["arpi_key_name"]) and \
not exists(CONFIG["arpi_key_name"] + ".pub"):
generate_SSH_key(CONFIG["arpi_key_name"], CONFIG["arpi_password"])
dhparam_file = "arpi_dhparam.pem"
if not exists(dhparam_file):
logger.info("dhparam (%s) generating", dhparam_file)
system(f"openssl dhparam -out {dhparam_file} {CONFIG["dhparam_size"]}")
else:
logger.info("dhparam (%s) already exists", dhparam_file)
system(f"openssl dhparam -in {dhparam_file} -text | head -3")
# create the env variables string because paramiko update_evironment ignores them
arguments = {
"ARPI_PASSWORD": CONFIG["arpi_password"],
"ARGUS_DB_SCHEMA": CONFIG["argus_db_schema"],
"ARGUS_DB_USERNAME": CONFIG["argus_db_username"],
"ARGUS_DB_PASSWORD": CONFIG["argus_db_password"],
"ARPI_HOSTNAME": CONFIG["arpi_hostname"],
"DHPARAM_FILE": join("/tmp", dhparam_file),
# progress
"QUIET": "" if CONFIG["progress"] else "-q",
"PROGRESS": "on" if CONFIG["progress"] else "off"
}
# adding package versions
arguments.update({p.upper(): f"{v}" for p, v in CONFIG["packages"].items() if v})
arguments = [f"export {key}={value}" for key, value in arguments.items()]
arguments = "; ".join(arguments)
ssh = get_connection()
scp = SCPClient(ssh.get_transport(), progress=show_progress if CONFIG["progress"] else None)
scp.put("scripts/install_environment.sh", remote_path=".")
deep_copy(ssh, join(CONFIG["server_path"], "etc"), "/tmp/etc", "**/*", CONFIG["progress"])
list_copy(
ssh,
((dhparam_file, "/tmp"),),
CONFIG["progress"]
)
channel = ssh.get_transport().open_session()
channel.get_pty()
channel.set_combine_stderr(True)
output = channel.makefile("r", -1)
logger.info("Starting install script...")
channel.exec_command(f"{arguments}; ./install_environment.sh")
print_lines(output)
ssh.close()
# waiting for user
# 1. deploy key can timeout
# 2. ssh accept password only from terminal
input("Waiting before deploying public key!")
command = f"ssh-copy-id -i {CONFIG["arpi_key_name"]} {CONFIG["arpi_username"]}@{CONFIG["default_hostname"]}"
logger.info("Deploy public key: %s", command)
while subprocess.call(command, shell=True) != 0:
# retry after 2 seconds
sleep(2)
ssh = get_connection()
execute_remote(
message="Enabling key based ssh authentication",
ssh=ssh,
command="sudo sed -i -E -e 's/.*PasswordAuthentication (yes|no)/PasswordAuthentication no/g' /etc/ssh/sshd_config",
)
execute_remote(message="Restarting the host", ssh=ssh, command="sudo reboot")
def install_component(component, update=False, restart=False):
"""
Install the monitor component to a Raspberry PI.
"""
ssh = get_connection()
execute_remote(
message="Creating server directories...",
ssh=ssh,
command="mkdir -p server/etc server/scripts server/src server/webapplication",
)
logger.info("Copy common files...")
list_copy(
ssh,
(
(join(CONFIG["server_path"], "Pipfile"), "server"),
(join(CONFIG["server_path"], "Pipfile.lock"), "server"),
(join(CONFIG["server_path"], f".env_{CONFIG["environment"]}"), "server/.env"),
(join(CONFIG["server_path"], "src", "data.py"), join("server", "src", "data.py")),
(join(CONFIG["server_path"], "src", "hash.py"), join("server", "src", "hash.py")),
(join(CONFIG["server_path"], "src", "models.py"), join("server", "src", "models.py")),
), CONFIG["progress"]
)
deep_copy(
ssh, join(CONFIG["server_path"], "src", "tools"), join("server", "src", "tools"), "**/*.py", CONFIG["progress"]
)
logger.info("Copy component '%s'...", component)
deep_copy(
ssh,
join(CONFIG["server_path"], "src", component),
join("server", "src", component),
"**/*.py",
CONFIG["progress"]
)
if update:
execute_remote(
message="Start installing python packages on sytem...",
ssh=ssh,
command="cd server; sudo PIPENV_TIMEOUT=9999 pipenv install --system",
)
execute_remote(
message="Create virtual environment with python3 for argus...",
ssh=ssh,
command="cd server; PIPENV_TIMEOUT=9999 CI=1 pipenv install --skip-lock --site-packages",
)
execute_remote(
message="Create virtual environment with python3 for root...",
ssh=ssh,
command="cd server; sudo PIPENV_TIMEOUT=9999 CI=1 pipenv install --skip-lock --site-packages",
)
if restart:
execute_remote(
message="Restarting the service...",
ssh=ssh,
command="sudo systemctl restart argus_monitor.service argus_server.service",
)
ssh.close()
def install_server(update=False, restart=False):
"""
Install the server component to a Raspberry PI.
"""
install_component("server", update=update, restart=restart)
def install_monitoring(update=False, restart=False):
"""
Install the monitor component to a Raspberry PI.
"""
install_component("monitoring", update=update, restart=restart)
def install_database():
"""
Install the database component to a Raspberry PI.
"""
ssh = get_connection()
execute_remote(
message="Initialize database...",
ssh=ssh,
command="cd server; pipenv run flask db init",
)
execute_remote(
message="Migrate database...",
ssh=ssh,
command="cd server; pipenv run flask db migrate",
)
execute_remote(
message="Upgrade database...",
ssh=ssh,
command="cd server; pipenv run flask db upgrade",
)
execute_remote(
message="Updating database content...",
ssh=ssh,
command=f"cd server; pipenv run src/data.py -d -c {CONFIG["argus_db_content"]}",
)
ssh.close()
def install_webapplication(restart=False):
"""
Install the web application component to a Raspberry PI.
"""
ssh = get_connection()
execute_remote(
message="Delete old webapplication on remote site...",
ssh=ssh,
command="rm -R server/webapplication || true",
)
target = join("server", "webapplication")
logger.info("Copy web application: %s => %s", CONFIG["webapplication_path"], target)
deep_copy(ssh, CONFIG["webapplication_path"], target, "**/*", CONFIG["progress"])
if restart:
execute_remote(
message="Restarting the service...",
ssh=ssh,
command="sudo systemctl restart argus_server.service",
)
def main(argv=None): # IGNORE:C0111
"""Command line options."""
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
try:
# Setup argument parser
parser = ArgumentParser(
description=program_license, formatter_class=RawDescriptionHelpFormatter
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="count",
help="set verbosity level [default: %(default)s]",
)
parser.add_argument(
"component",
choices=["environment", "server", "monitoring", "webapplication", "database"],
)
parser.add_argument(
"-e",
"--env",
dest="environment",
default="",
help="Select a different config (install.{environment}.yaml)",
)
parser.add_argument(
"-r",
"--restart",
action="store_true",
help="Restart depending service(s) after deployment",
)
parser.add_argument(
"-u",
"--update",
action="store_true",
help="Update the python environment for the depending service(s) after deployment",
)
parser.add_argument(
"-p",
"--progress",
action="store_true",
help="Show progress bars",
)
# Process arguments
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
logger.info("Verbose mode on")
else:
logger.setLevel(logging.INFO)
config_filename = __file__.replace(".py", ".yaml")
if args.environment:
config_filename = config_filename.replace(".yaml", "." + args.environment + ".yaml")
logger.info("Working with %s", args)
logger.info("Working from %s", config_filename)
with open(config_filename, "r") as stream:
global CONFIG
CONFIG = yaml.load(stream, Loader=yaml.FullLoader)
CONFIG["progress"] = args.progress
logger.info("Working with configuration: \n%s", json.dumps(CONFIG, indent=4, sort_keys=True))
input("Waiting before starting the installation to verify the configuration!")
if args.component == "environment":
install_environment()
elif args.component == "server":
install_server(args.update, args.restart)
elif args.component == "monitoring":
install_monitoring(args.update, args.restart)
elif args.component == "webapplication":
install_webapplication(args.restart)
elif args.component == "database":
install_database()
else:
logger.error("Unknown component: %s", args.component)
logger.info("Finished successfully!")
return 0
except KeyboardInterrupt:
# handle keyboard interrupt ###
logger.info("\n\nCancelled!\n")
return 0
except Exception:
logger.exception("Failed to execute!")
return 2
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/env python
# encoding: utf-8
"""
Script for installing the components of the ArPI home security system to a running Raspberry PI Zero Wifi host.
It uses the configuration file install.yaml!
---
@author: Gábor Kovács
@copyright: 2017 arpi-security.info. All rights reserved.
@contact: [email protected]
"""
import json
import logging
import subprocess
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from os import system
from os.path import join, exists
from socket import gaierror
from time import sleep
import paramiko
import yaml
from paramiko.ssh_exception import AuthenticationException, NoValidConnectionsError
from scp import SCPClient
from utils import (
deep_copy,
execute_remote,
generate_SSH_key,
list_copy,
print_lines,
show_progress
)
CONFIG = {}
logging.basicConfig(format="%(message)s")
logger = logging.getLogger()
logging.getLogger("paramiko").setLevel(logging.CRITICAL)
__all__ = []
__version__ = 0.1
__date__ = "2017-08-21"
__updated__ = "2019-08-21"
program_shortdesc = __import__("__main__").__doc__.split("---")[0]
program_license = """%s
Created by [email protected] on %s.
Copyright 2019 arpi-security.info. All rights reserved.
USAGE
""" % (
program_shortdesc,
str(__date__),
)
def get_connection():
try:
logger.info(
"Connecting with private key in '%s' %s@%s",
CONFIG["arpi_key_name"],
CONFIG["arpi_username"],
CONFIG["arpi_hostname"],
)
private_key = None
if exists(CONFIG["arpi_key_name"]):
private_key = paramiko.RSAKey.from_private_key_file(
CONFIG["arpi_key_name"], CONFIG["arpi_password"]
)
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
CONFIG["arpi_hostname"],
username=CONFIG["arpi_username"],
password=CONFIG["arpi_password"],
pkey=private_key,
)
logger.info("Connected")
except (AuthenticationException, NoValidConnectionsError, gaierror):
try:
logger.info("Connecting %s@%s", CONFIG["default_username"], CONFIG["default_hostname"])
ssh.connect(
CONFIG["default_hostname"],
username=CONFIG["default_username"],
password=CONFIG["default_password"],
)
logger.info("Connected")
except (NoValidConnectionsError, gaierror):
raise Exception("Can't connect to the host!")
return ssh
def install_environment():
"""
Install prerequisites to an empty Raspberry PI.
"""
if not exists(CONFIG["arpi_key_name"]) and \
not exists(CONFIG["arpi_key_name"] + ".pub"):
generate_SSH_key(CONFIG["arpi_key_name"], CONFIG["arpi_password"])
dhparam_file = "arpi_dhparam.pem"
if not exists(dhparam_file):
logger.info("dhparam (%s) generating", dhparam_file)
system(f"openssl dhparam -out {dhparam_file} {CONFIG['dhparam_size']}")
else:
logger.info("dhparam (%s) already exists", dhparam_file)
system(f"openssl dhparam -in {dhparam_file} -text | head -3")
# create the env variables string because paramiko update_evironment ignores them
arguments = {
"ARPI_PASSWORD": CONFIG["arpi_password"],
"ARGUS_DB_SCHEMA": CONFIG["argus_db_schema"],
"ARGUS_DB_USERNAME": CONFIG["argus_db_username"],
"ARGUS_DB_PASSWORD": CONFIG["argus_db_password"],
"ARPI_HOSTNAME": CONFIG["arpi_hostname"],
"DHPARAM_FILE": join("/tmp", dhparam_file),
# progress
"QUIET": "" if CONFIG["progress"] else "-q",
"PROGRESS": "on" if CONFIG["progress"] else "off"
}
# adding package versions
arguments.update({p.upper(): f"{v}" for p, v in CONFIG["packages"].items() if v})
arguments = [f"export {key}={value}" for key, value in arguments.items()]
arguments = "; ".join(arguments)
ssh = get_connection()
scp = SCPClient(ssh.get_transport(), progress=show_progress if CONFIG["progress"] else None)
scp.put("scripts/install_environment.sh", remote_path=".")
deep_copy(ssh, join(CONFIG["server_path"], "etc"), "/tmp/etc", "**/*", CONFIG["progress"])
list_copy(
ssh,
((dhparam_file, "/tmp"),),
CONFIG["progress"]
)
channel = ssh.get_transport().open_session()
channel.get_pty()
channel.set_combine_stderr(True)
output = channel.makefile("r", -1)
logger.info("Starting install script...")
channel.exec_command(f"{arguments}; ./install_environment.sh")
print_lines(output)
ssh.close()
# waiting for user
# 1. deploy key can timeout
# 2. ssh accept password only from terminal
input("Waiting before deploying public key!")
command = f"ssh-copy-id -i {CONFIG['arpi_key_name']} {CONFIG['arpi_username']}@{CONFIG['default_hostname']}"
logger.info("Deploy public key: %s", command)
while subprocess.call(command, shell=True) != 0:
# retry after 2 seconds
sleep(2)
ssh = get_connection()
execute_remote(
message="Enabling key based ssh authentication",
ssh=ssh,
command="sudo sed -i -E -e 's/.*PasswordAuthentication (yes|no)/PasswordAuthentication no/g' /etc/ssh/sshd_config",
)
execute_remote(message="Restarting the host", ssh=ssh, command="sudo reboot")
def install_component(component, update=False, restart=False):
"""
Install the monitor component to a Raspberry PI.
"""
ssh = get_connection()
execute_remote(
message="Creating server directories...",
ssh=ssh,
command="mkdir -p server/etc server/scripts server/src server/webapplication",
)
logger.info("Copy common files...")
list_copy(
ssh,
(
(join(CONFIG["server_path"], "Pipfile"), "server"),
(join(CONFIG["server_path"], "Pipfile.lock"), "server"),
(join(CONFIG["server_path"], f".env_{CONFIG['environment']}"), "server/.env"),
(join(CONFIG["server_path"], "src", "data.py"), join("server", "src", "data.py")),
(join(CONFIG["server_path"], "src", "hash.py"), join("server", "src", "hash.py")),
(join(CONFIG["server_path"], "src", "models.py"), join("server", "src", "models.py")),
), CONFIG["progress"]
)
deep_copy(
ssh, join(CONFIG["server_path"], "src", "tools"), join("server", "src", "tools"), "**/*.py", CONFIG["progress"]
)
logger.info("Copy component '%s'...", component)
deep_copy(
ssh,
join(CONFIG["server_path"], "src", component),
join("server", "src", component),
"**/*.py",
CONFIG["progress"]
)
if update:
execute_remote(
message="Start installing python packages on sytem...",
ssh=ssh,
command="cd server; sudo PIPENV_TIMEOUT=9999 pipenv install --system",
)
execute_remote(
message="Create virtual environment with python3 for argus...",
ssh=ssh,
command="cd server; PIPENV_TIMEOUT=9999 CI=1 pipenv install --skip-lock --site-packages",
)
execute_remote(
message="Create virtual environment with python3 for root...",
ssh=ssh,
command="cd server; sudo PIPENV_TIMEOUT=9999 CI=1 pipenv install --skip-lock --site-packages",
)
if restart:
execute_remote(
message="Restarting the service...",
ssh=ssh,
command="sudo systemctl restart argus_monitor.service argus_server.service",
)
ssh.close()
def install_server(update=False, restart=False):
"""
Install the server component to a Raspberry PI.
"""
install_component("server", update=update, restart=restart)
def install_monitoring(update=False, restart=False):
"""
Install the monitor component to a Raspberry PI.
"""
install_component("monitoring", update=update, restart=restart)
def install_database():
"""
Install the database component to a Raspberry PI.
"""
ssh = get_connection()
execute_remote(
message="Initialize database...",
ssh=ssh,
command="cd server; pipenv run flask db init",
)
execute_remote(
message="Migrate database...",
ssh=ssh,
command="cd server; pipenv run flask db migrate",
)
execute_remote(
message="Upgrade database...",
ssh=ssh,
command="cd server; pipenv run flask db upgrade",
)
execute_remote(
message="Updating database content...",
ssh=ssh,
command=f"cd server; pipenv run src/data.py -d -c {CONFIG['argus_db_content']}",
)
ssh.close()
def install_webapplication(restart=False):
"""
Install the web application component to a Raspberry PI.
"""
ssh = get_connection()
execute_remote(
message="Delete old webapplication on remote site...",
ssh=ssh,
command="rm -R server/webapplication || true",
)
target = join("server", "webapplication")
logger.info("Copy web application: %s => %s", CONFIG["webapplication_path"], target)
deep_copy(ssh, CONFIG["webapplication_path"], target, "**/*", CONFIG["progress"])
if restart:
execute_remote(
message="Restarting the service...",
ssh=ssh,
command="sudo systemctl restart argus_server.service",
)
def main(argv=None): # IGNORE:C0111
"""Command line options."""
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
try:
# Setup argument parser
parser = ArgumentParser(
description=program_license, formatter_class=RawDescriptionHelpFormatter
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="count",
help="set verbosity level [default: %(default)s]",
)
parser.add_argument(
"component",
choices=["environment", "server", "monitoring", "webapplication", "database"],
)
parser.add_argument(
"-e",
"--env",
dest="environment",
default="",
help="Select a different config (install.{environment}.yaml)",
)
parser.add_argument(
"-r",
"--restart",
action="store_true",
help="Restart depending service(s) after deployment",
)
parser.add_argument(
"-u",
"--update",
action="store_true",
help="Update the python environment for the depending service(s) after deployment",
)
parser.add_argument(
"-p",
"--progress",
action="store_true",
help="Show progress bars",
)
# Process arguments
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
logger.info("Verbose mode on")
else:
logger.setLevel(logging.INFO)
config_filename = __file__.replace(".py", ".yaml")
if args.environment:
config_filename = config_filename.replace(".yaml", "." + args.environment + ".yaml")
logger.info("Working with %s", args)
logger.info("Working from %s", config_filename)
with open(config_filename, "r") as stream:
global CONFIG
CONFIG = yaml.load(stream, Loader=yaml.FullLoader)
CONFIG["progress"] = args.progress
logger.info("Working with configuration: \n%s", json.dumps(CONFIG, indent=4, sort_keys=True))
input("Waiting before starting the installation to verify the configuration!")
if args.component == "environment":
install_environment()
elif args.component == "server":
install_server(args.update, args.restart)
elif args.component == "monitoring":
install_monitoring(args.update, args.restart)
elif args.component == "webapplication":
install_webapplication(args.restart)
elif args.component == "database":
install_database()
else:
logger.error("Unknown component: %s", args.component)
logger.info("Finished successfully!")
return 0
except KeyboardInterrupt:
# handle keyboard interrupt ###
logger.info("\n\nCancelled!\n")
return 0
except Exception:
logger.exception("Failed to execute!")
return 2
if __name__ == "__main__":
sys.exit(main())
|
import os
import pytest
import re
import subprocess
import time
from afdko.fdkutils import (
get_temp_file_path,
get_temp_dir_path,
)
from test_utils import (
get_input_path,
get_bad_input_path,
get_expected_path,
generate_ps_dump,
)
from runner import main as runner
from differ import main as differ, SPLIT_MARKER
TOOL = 'tx'
CMD = ['-t', TOOL]
def _get_extension(in_format):
if 'ufo' in in_format:
return '.ufo'
elif in_format == 'type1':
return '.pfa'
return '.' + in_format
PDF_SKIP = [
'/Creator' + SPLIT_MARKER +
'/Producer' + SPLIT_MARKER +
'/CreationDate' + SPLIT_MARKER +
'/ModDate' + SPLIT_MARKER +
'(Date:' + SPLIT_MARKER +
'(Time:',
]
PDF_SKIP_REGEX = [
'^.+30.00 Td',
'^.+0.00 Td',
]
PS_SKIP = [
'0 740 moveto (Filename:' + SPLIT_MARKER +
'560 (Date:' + SPLIT_MARKER +
'560 (Time:'
]
PS_SKIP2 = [
'%ADOt1write:'
]
PFA_SKIP = [
'%ADOt1write:' + SPLIT_MARKER +
'%%Copyright:' + SPLIT_MARKER
]
# -----------
# Basic tests
# -----------
@pytest.mark.parametrize('arg', ['-h', '-v', '-u'])
def test_exit_known_option(arg):
assert subprocess.call([TOOL, arg]) == 0
@pytest.mark.parametrize('arg', ['-bar', '-foo'])
def test_exit_unknown_option(arg):
assert subprocess.call([TOOL, arg]) == 1
@pytest.mark.parametrize('pth', [
['invalid_path'], # no such file or directory
[get_temp_file_path()], # end of file (not a font)
[get_input_path('type1.pfa'), 'a', 'b'], # too many file args
])
def test_exit_invalid_path_or_font(pth):
assert subprocess.call([TOOL] + pth) == 1
# -------------
# Options tests
# -------------
@pytest.mark.parametrize('args', [
['-s', '-t1'], # '-s' option must be last
['-t1', '-g', '0', '-gx', '1'], # options are mutually exclusive
['-dcf'], # non-CFF font
['-ps', '-1'], # must specify an all-glyph range
['-ufo'], ['-t1', '-pfb'], # must specify a destination path
['-t1', '-usefd'], # bad arg
['-t1', '-decid'], # input font is non-CID
])
def test_option_error_type1_input(args):
font_path = get_input_path('type1.pfa')
assert subprocess.call([TOOL] + args + [font_path]) == 1
@pytest.mark.parametrize('arg', ['-e', '-q', '+q', '-w', '+w', '-lf', '-cr',
'-crlf', '-decid', '-LWFN', '-pfb'])
def test_option_error_type1_clash(arg):
# options -pfb or -LWFN may not be used with other options
pfb = '-pfb' if arg != '-pfb' else '-LWFN'
assert subprocess.call([TOOL, '-t1', pfb, arg]) == 1
@pytest.mark.parametrize('args', [
['-cff', '-l'], ['-cff', '-0'], ['-cff', '-1'], ['-cff', '-2'],
['-cff', '-3'], ['-cff', '-4'], ['-cff', '-5'], ['-cff', '-6'],
['-cff', '-q'], ['-cff', '+q'], ['-cff', '-w'], ['-cff', '+w'],
['-cff', '-pfb'], ['-cff', '-usefd'], ['-cff', '-decid'],
['-cff', '-lf'], ['-cff', '-cr'], ['-cff', '-crlf'], ['-cff', '-LWFN'],
['-t1', '-gn0'], ['-t1', '-gn1'], ['-t1', '-gn2'], ['-t1', '-sa'],
['-t1', '-abs'], ['-t1', '-cefsvg'],
['-t1', '-no_futile'], ['-t1', '-no_opt'], ['-t1', '-d'], ['-t1', '+d'],
['-dcf', '-n'], ['-dcf', '-c'],
['-dump', '-E'], ['-dump', '+E'], ['-dump', '-F'], ['-dump', '+F'],
['-dump', '-O'], ['-dump', '+O'], ['-dump', '-S'], ['-dump', '+S'],
['-dump', '-T'], ['-dump', '+T'], ['-dump', '-V'], ['-dump', '+V'],
['-dump', '-b'], ['-dump', '+b'], ['-dump', '-e'], ['-dump', '+e'],
['-dump', '-Z'], ['-dump', '+Z'],
])
def test_option_error_wrong_mode(args):
assert subprocess.call([TOOL] + args) == 1
@pytest.mark.parametrize('arg', [
'-a', '-e', '-f', '-g', '-i', '-m', '-o', '-p', '-A', '-P', '-U', '-maxs',
'-usefd', '-fd', '-dd', '-sd', '-sr', ['-cef', '-F'], ['-dcf', '-T']
])
def test_option_error_no_args_left(arg):
if isinstance(arg, list):
arg_lst = [TOOL] + arg
else:
arg_lst = [TOOL, '-t1', arg]
assert subprocess.call(arg_lst) == 1
@pytest.mark.parametrize('args', [
['-maxs', 'X'], ['-m', 'X'], ['-e', 'X'], ['-e', '5'],
['-usefd', 'X'], ['-usefd', '-1']
])
def test_option_error_bad_arg(args):
assert subprocess.call([TOOL, '-t1'] + args) == 1
@pytest.mark.parametrize('arg2', ['-sd', '-sr', '-dd'])
@pytest.mark.parametrize('arg1', ['-a', '-f', '-A'])
def test_option_error_no_args_left2(arg1, arg2):
assert subprocess.call([TOOL, '-t1', arg1, arg2]) == 1
@pytest.mark.parametrize('arg2', ['-sd', '-sr', '-dd'])
@pytest.mark.parametrize('arg1', ['-a', '-f'])
def test_option_error_empty_list(arg1, arg2):
empty_dir = get_temp_dir_path()
assert subprocess.call([TOOL, '-t1', arg1, arg2, empty_dir]) == 1
@pytest.mark.parametrize('arg', ['-bc', '-z', '-cmp', '-sha1'])
def test_gone_options_bc(arg):
assert subprocess.call([TOOL, arg]) == 1
@pytest.mark.parametrize('mode, msg', [
('-h', b'tx (Type eXchange) is a test harness'),
('-u', b'tx {[mode][mode options][shared options][files]}*'),
('-afm', b'[-afm options: default none]'),
('-cef', b'[-cef options: default none]'),
('-cff', b'[-cff options: defaults -E, -F, -O, -S, +T, -V, -Z, -b, -d]'),
('-cff2', b'[-cff2 options: defaults -S, -b]'),
('-dcf', b'[-dcf options: defaults -T all, -5]'),
('-dump', b'[-dump options: default -1]'),
('-mtx', b'[-mtx options: default -0]'),
('-path', b'[-path options: default -0]'),
('-pdf', b'[-pdf options: default -0]'),
('-ps', b'[-ps options: default -0]'),
('-svg', b'[-svg options: defaults -lf, -gn0]'),
('-t1',
b'[-t1 options: defaults -0, -l, -E, -S, +T, -V, +q, -w, -e 4, -lf]'),
('-ufo', b'[-ufo options: default none]'),
])
def test_mode_help(mode, msg):
output = subprocess.check_output([TOOL, mode, '-h'])
assert msg in output
@pytest.mark.parametrize('dcf_dump_level', ['0', '1', '5'])
def test_script_file(dcf_dump_level):
font_path = get_input_path('cid.otf')
opts_path = get_temp_file_path()
opts_file_content = f'\n# foo\n # bar\r -{dcf_dump_level}\t"{font_path}"'
with open(opts_path, 'a') as fp:
fp.write(opts_file_content)
actual_path = runner(CMD + ['-s', '-a', '-o', 'dcf', 's', '-f', opts_path])
expected_path = get_expected_path(f'cid_dcf_{dcf_dump_level}.txt')
assert differ([expected_path, actual_path])
def test_nested_script():
# nested scripts not allowed
temp_path = get_temp_file_path()
assert subprocess.call([TOOL, '-s', 'foobar', '-s', temp_path]) == 1
@pytest.mark.parametrize('layer_name', ['', 'None', 'background', 'foobar'])
def test_ufo_altlayer(layer_name):
if not layer_name:
fname = 'processed'
args = []
else:
fname = 'foreground' if layer_name == 'None' else layer_name
args = ['altLayer', f'_{fname}']
actual_path = runner(CMD + ['-s', '-f', 'altlayer.ufo', '-o', '6'] + args)
expected_path = get_expected_path(f'altlayer_{fname}.txt')
assert differ([expected_path, actual_path])
@pytest.mark.parametrize('arg, filename', [
('-a', 'ufo3.t1'),
('-A', 'SourceSansPro-Regular.t1'),
])
def test_a_options(arg, filename):
input_path = get_input_path('ufo3.ufo')
output_path = os.path.join(os.getcwd(), filename)
assert os.path.exists(output_path) is False
subprocess.call([TOOL, '-t1', arg, input_path])
assert os.path.exists(output_path) is True
os.remove(output_path)
def test_o_option():
input_path = get_input_path('ufo3.ufo')
expected_path = get_expected_path('ufo3.pfa')
output_path = get_temp_file_path()
subprocess.call([TOOL, '-t1', '-o', output_path, input_path])
assert differ([expected_path, output_path, '-s', PFA_SKIP[0]])
def test_f_option():
fpath1 = get_input_path('type1.pfa')
fpath2 = get_input_path('cff2_vf.otf')
actual_path = runner(CMD + ['-s', '-o', 'mtx', '3',
'f', f'_{fpath1}', f'_{fpath2}'])
expected_path = get_expected_path('mtx_f_options.txt')
assert differ([expected_path, actual_path])
def test_stdin():
input_path = get_input_path('type1.pfa')
expected_path = get_expected_path('stdin.txt')
output_path = get_temp_file_path()
with open(input_path) as fp:
output = subprocess.check_output([TOOL], stdin=fp)
with open(output_path, 'wb') as fp:
fp.write(output)
assert differ([expected_path, output_path])
@pytest.mark.parametrize('arg', ['0', '-16'])
def test_m_option_success(arg):
# mem_manage() is called 16 times with the command 'tx -m 0 type1.pfa'
input_path = get_input_path('type1.pfa')
assert subprocess.call([TOOL, '-m', arg, input_path]) == 0
# Disabled because of https://github.com/adobe-type-tools/afdko/issues/933
# @pytest.mark.parametrize('arg', range(1, 16))
# def test_m_option_fail(arg):
# input_path = get_input_path('type1.pfa')
# assert subprocess.call([TOOL, '-m', f'-{arg}', input_path]) != 0
@pytest.mark.parametrize('arg, exp_filename', [(None, 'not_removed'),
('-V', 'not_removed'),
('+V', 'removed')])
def test_V_option(arg, exp_filename):
input_path = get_input_path('overlap.pfa')
expected_path = get_expected_path(f'overlap_{exp_filename}.pfa')
output_path = get_temp_file_path()
args = [TOOL, '-t1', '-o', output_path, input_path]
if arg:
args.insert(2, arg)
subprocess.call(args)
assert differ([expected_path, output_path] + ['-s'] + PFA_SKIP)
# -------------
# Convert tests
# -------------
@pytest.mark.parametrize('to_format', [
'ufo2',
'ufo3',
'type1',
'svg',
'mtx',
'afm',
'pdf',
'ps',
'cff',
])
@pytest.mark.parametrize('from_format', [
'ufo2',
'ufo3',
'type1',
])
def test_convert(from_format, to_format):
from_ext = _get_extension(from_format)
to_ext = _get_extension(to_format)
# input filename
from_filename = from_format + from_ext
# expected filename
exp_filename = from_format + to_ext
# runner args
if 'ufo' in to_format:
save_path = get_temp_dir_path('font.ufo')
else:
save_path = get_temp_file_path()
# diff mode
if to_format == 'cff':
diff_mode = ['-m', 'bin']
else:
diff_mode = []
# skip items
regex_skip = []
skip = []
if to_format == 'afm':
skip = ['Comment Creation Date:' + SPLIT_MARKER + 'Comment Copyright']
elif to_format == 'pdf':
skip = PDF_SKIP[:]
regex_skip = PDF_SKIP_REGEX[:]
elif to_format == 'ps':
skip = PS_SKIP[:]
elif to_format == 'type1':
skip = PFA_SKIP[:]
if skip:
skip.insert(0, '-s')
if regex_skip:
for regex in regex_skip:
skip.append('-r')
skip.append(regex)
# format arg fix
if to_format in ('ufo2', 'ufo3'):
format_arg = 'ufo'
elif to_format == 'type1':
format_arg = 't1'
else:
format_arg = to_format
runner(CMD + ['-a', '-f', get_input_path(from_filename), save_path,
'-o', format_arg])
expected_path = get_expected_path(exp_filename)
assert differ([expected_path, save_path] + skip + diff_mode)
def test_cef_cefsvg():
font_path = get_input_path('cff2_vf.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cef', 'cefsvg', 'cr', 'gn1', 'abs', 'sa',
'-f', font_path, output_path])
expected_path = get_expected_path('cef_cefsvg_cr.svg')
assert differ([expected_path, output_path])
@pytest.mark.parametrize('file_ext', [
'pfa', 'pfabin', 'pfb', 'lwfn', 'bidf']) # TODO: 'bidf85'
def test_type1_inputs(file_ext):
bidf = '.bidf' if 'bidf' in file_ext else ''
actual_path = runner(CMD + ['-s', '-o', '2', '-f', f'type1.{file_ext}'])
expected_path = get_expected_path(f'type1.dump2{bidf}.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
@pytest.mark.parametrize('args', [[], ['U', '_500,500'], ['U', '_0,0', 'n']])
@pytest.mark.parametrize('fname', ['zx', 'zy'])
def test_type1mm_inputs(fname, args):
fname2 = f'.{''.join(args)}' if args else ''
actual_path = runner(CMD + ['-s', '-f', f'{fname}.pfb', '-o', '2'] + args)
expected_path = get_expected_path(f'{fname}.dump2{fname2}.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
@pytest.mark.parametrize('fext', ['otf', 'ttf', 'cff', 'cef', 'ttc'])
def test_other_input_formats(fext):
arg = ['y'] if fext == 'ttc' else []
actual_path = runner(CMD + ['-s', '-f', f'font.{fext}', '-o', '3'] + arg)
expected_path = get_expected_path(f'font.{fext}.dump3.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
# ----------
# Dump tests
# ----------
@pytest.mark.parametrize('args', [
[],
['0'],
['dump', '0'],
['1'],
['2'],
['3'],
['4'],
['4', 'N'],
['5'],
['6'],
['6', 'd'],
['6', 'n'],
])
@pytest.mark.parametrize('font_filename', ['type1.pfa', 'svg.svg'])
def test_dump_option(args, font_filename):
if any([arg in args for arg in ('4', '5', '6')]):
skip = []
else:
skip = ['-s', '## Filename']
head = font_filename.split('.')[0]
midl = ''.join(args) if args else 'dump1'
if 'dump' not in midl:
midl = f'dump{midl}'
exp_filename = f'{head}.{midl}.txt'
opts = ['-o'] + args if args else []
actual_path = runner(CMD + ['-s', '-f', font_filename] + opts)
expected_path = get_expected_path(exp_filename)
assert differ([expected_path, actual_path] + skip)
@pytest.mark.parametrize('fext', ['pfa', 'ufo'])
def test_dump_flex_op(fext):
fname = 'flex'
actual_path = runner(CMD + ['-s', '-o', '6', '-f', f'{fname}.{fext}'])
expected_path = get_expected_path(f'{fname}.txt')
assert differ([expected_path, actual_path])
# ----------
# CFF2 tests
# ----------
@pytest.mark.parametrize('filename, msg', [
('avar_invalid_table_version',
b'(cfr) invalid avar table version'),
('fvar_invalid_table_version',
b'(cfr) invalid fvar table version'),
('avar_invalid_table_size',
b'(cfr) invalid avar table size'),
('fvar_invalid_table_size',
b'(cfr) invalid fvar table size'),
('fvar_invalid_table_header',
b'(cfr) invalid values in fvar table header'),
('avar_invalid_axis-instance_count-size',
b'(cfr) invalid avar table size or axis/instance count/size'),
('fvar_invalid_axis-instance_count-size',
b'(cfr) invalid fvar table size or axis/instance count/size'),
('avar_axis_value_map_out_of_bounds',
b'(cfr) avar axis value map out of bounds'),
('avar_fvar_axis_mismatch',
b'(cfr) mismatching axis counts in fvar and avar'),
])
def test_varread_errors(filename, msg):
font_path = get_bad_input_path(f'vf_{filename}.otf')
output = subprocess.check_output([TOOL, '-dcf', '-0', font_path],
stderr=subprocess.STDOUT)
assert msg in output
@pytest.mark.parametrize('args, exp_filename', [
([], 'SourceCodeVar-Roman_CFF2'),
(['*S', '*b', 'std'], 'SourceCodeVar-Roman_CFF2_subr'), # subroutinize
])
def test_cff2_extract(args, exp_filename):
# read CFF2 VF, write CFF2 table
font_path = get_input_path('SourceCodeVariable-Roman.otf')
cff2_path = get_temp_file_path()
runner(CMD + ['-a', '-f', font_path, cff2_path, '-o', 'cff2'] + args)
expected_path = get_expected_path(exp_filename)
assert differ([expected_path, cff2_path, '-m', 'bin'])
def test_cff2_sub_dump():
# Dump a subroutinized CFF2 font. This is a J font with 64K glyphs,
# and almost every subr and charstring is a single subr call.
# A good test for problems with charstrings with no endchar operator.
actual_path = runner(CMD + ['-s', '-o', 'dump', '6', 'g', '_21847',
'-f', 'CFF2-serif-sub.cff2'])
expected_path = get_expected_path('CFF2-serif-sub.cff2.txt')
assert differ([expected_path, actual_path])
def test_varread_pr355():
# read CFF2 VF, write Type1 snapshot
# Note that cff2_vf is built from the sources at:
# afdko/tests/buildmasterotfs_data/input/cff2_vf.
actual_path = runner(CMD + ['-s', '-o', 't1', '-f', 'cff2_vf.otf'])
expected_path = get_expected_path('cff2_vf.pfa')
skip = ['-s'] + PFA_SKIP[:]
assert differ([expected_path, actual_path] + skip)
def test_cff2_no_vf_bug353():
# read CFF2 WITHOUT VF info, write a CFF2 out. 'regular_CFF2.otf'
# is derived by taking the regular.otf file from the sfntdiff
# 'input_data' directory, and converting the CFF table to CFF2.
font_path = get_input_path('regular_CFF2.otf')
cff2_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cff2', '-f', font_path, cff2_path])
expected_path = get_expected_path('regular_CFF2.cff2')
assert differ([expected_path, cff2_path, '-m', 'bin'])
def test_cff2_with_spare_masters_pr835():
# SetNumMasters was incorrectly passing the number of region indices to
# var_getIVSRegionIndices for the regionListCount. With PR #835 it now
# passes the total region count for regionListCount.
#
# Example of the bug -- this command:
# tx -cff2 +S +b -std SHSansJPVFTest.otf SHSansJPVFTest.cff2
# Would produce the following warning & error:
# inconsistent region indices detected in item variation store subtable 1
# memory error
font_path = get_input_path('SHSansJPVFTest.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o',
'cff2', '*S', '*b', 'std',
'-f', font_path, output_path])
expected_path = get_expected_path('SHSansJPVFTest.cff2')
assert differ([expected_path, output_path, '-m', 'bin'])
@pytest.mark.parametrize('vector, exp_filename', [
('9999,9999,9999,9999,999,9', 'psname_last_resort_no.txt'),
('9999,9999,9999,9999,999,99', 'psname_last_resort_yes.txt'),
])
def test_last_resort_instance_psname(vector, exp_filename):
font_path = get_input_path('cff2_vf_many_axes.otf')
output_path = get_temp_file_path()
runner(CMD + ['-o', '0', 'U', f'_{vector}', '-f', font_path, output_path])
expected_path = get_expected_path(exp_filename)
assert differ([expected_path, output_path, '-s', '## Filename'])
# -----------
# Other tests
# -----------
def test_trademark_string_pr425():
# the copyright symbol used in the trademark field of a UFO is
# converted to 'Copyright' and stored in Notice field of a Type1
actual_path = runner(CMD + ['-s', '-o', 't1', '-f', 'trademark.ufo'])
expected_path = get_expected_path('trademark.pfa')
skip = ['-s'] + PFA_SKIP[:]
assert differ([expected_path, actual_path] + skip)
def test_remove_hints_bug180():
font_path = get_input_path('cid.otf')
cid_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 't1', 'n', '-f', font_path, cid_path])
expected_path = get_expected_path('cid_nohints.ps')
expected_path = generate_ps_dump(expected_path)
actual_path = generate_ps_dump(cid_path)
skip = ['-s'] + PS_SKIP2
assert differ([expected_path, actual_path] + skip)
def test_long_charstring_read_bug444():
# read a CFF2 VF with a charstring longer that 65535, check output
actual_path = runner(CMD + ['-s', '-o', '0', '-f', 'CJK-VarTest.otf'])
expected_path = get_expected_path('CJK-VarTest_read.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
def test_long_charstring_warning():
# read a CFF2 VF with a charstring longer that 65535, check warning message
# NOTE: can't diff the output against 'CJK-VarTest_warn.txt' because on
# Windows the lines start with 'tx.exe:' instead of just 'tx:'
actual_path = runner(
CMD + ['-s', '-e', '-o', '5', '-f', 'CJK-VarTest.otf'])
# expected_path = get_expected_path('CJK-VarTest_warn.txt')
with open(actual_path, 'rb') as f:
output = f.read()
assert b"(cfr) Warning: CharString of GID 1 is 71057 bytes long" in output
def test_long_charstring_write():
# read a CFF2 VF with a charstring longer that 65535, write out CFF2 file
# NOTE: the font 'CJK-VarTest.otf' cannot be used in this test because
# once its long charstring is optimized (floats -> ints) it's no longer
# over the 65535 bytes limit; the long charstring in 'CJK-VarTest2.otf' is
# already as small as possible, so it will trigger the check in cffwrite.c
font_path = get_input_path('CJK-VarTest2.otf')
cff2_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cff2', '-f', font_path, cff2_path])
expected_path = get_expected_path('CJK-VarTest2.cff2')
assert differ([expected_path, cff2_path, '-m', 'bin'])
def test_many_hints_string_bug354():
# The glyph T@gid002 has 33 hstem hints. This tests a bug where
# tx defined an array of only 6 operands.
# This is encountered only when wrinting to a VF CFF2.
font_path = get_input_path('cff2_vf.otf')
cff2_path = get_temp_file_path()
dcf_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cff2', '-f', font_path, cff2_path])
runner(CMD + ['-a', '-o', 'dcf', '-f', cff2_path, dcf_path])
expected_path = get_expected_path('cff2_vf.dcf.txt')
assert differ([expected_path, dcf_path])
def test_non_varying_glyphs_bug356():
"""A glyph which is non-varying in a variable font may be referenced by a
VariationStore data item subtable which has a region count of 0. The VF
support code assumed that this was an error, and issued a false warning.
File 'bug356.otf' is a handcrafted modification of 'cff2_vf.otf'. The
latter cannot be used as-is to validate the fix."""
actual_path = get_temp_file_path()
font_path = get_input_path('bug356.otf')
stderr_path = runner(CMD + ['-s', '-e', '-a', '-o', 'cff',
'-f', font_path, actual_path])
expected_path = get_expected_path('bug356.txt')
assert differ([expected_path, stderr_path, '-l', '1'])
@pytest.mark.parametrize('font_format', ['type1', 'cidfont', 'ufo2', 'ufo3'])
def test_no_psname_dump_bug437(font_format):
if 'cid' in font_format:
file_ext = 'ps'
elif 'ufo' in font_format:
file_ext = 'ufo'
else:
file_ext = 'pfa'
filename = f'{font_format}-noPSname.{file_ext}'
expected_path = get_expected_path(f'bug437/dump-{font_format}.txt')
actual_path = runner(CMD + ['-s', '-o', 'dump', '0', '-f', filename])
assert differ([expected_path, actual_path, '-l', '1'])
@pytest.mark.parametrize('font_format', ['type1', 'cidfont', 'ufo2', 'ufo3'])
def test_no_psname_convert_to_ufo_bug437(font_format):
if 'cid' in font_format:
file_ext = 'ps'
elif 'ufo' in font_format:
file_ext = 'ufo'
else:
file_ext = 'pfa'
font_path = get_input_path(f'{font_format}-noPSname.{file_ext}')
expected_path = get_expected_path(f'bug437/{font_format}.ufo')
save_path = get_temp_dir_path(f'{font_format}.ufo')
runner(CMD + ['-a', '-o', 'ufo', '-f', font_path, save_path])
assert differ([expected_path, save_path])
@pytest.mark.parametrize('font_format', ['type1', 'cidfont', 'ufo2', 'ufo3'])
def test_no_psname_convert_to_type1_bug437(font_format):
if 'cid' in font_format:
file_ext = 'ps'
elif 'ufo' in font_format:
file_ext = 'ufo'
else:
file_ext = 'pfa'
filename = f'{font_format}-noPSname.{file_ext}'
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-o', 't1', '-f', filename])
assert err.value.returncode in (5, 6)
def test_illegal_chars_in_glyph_name_bug473():
font_path = get_input_path('bug473.ufo')
save_path = get_temp_dir_path('bug473.ufo')
runner(CMD + ['-a', '-o', 'ufo', '-f', font_path, save_path])
expected_path = get_expected_path('bug473.ufo')
assert differ([expected_path, save_path])
def test_subroutine_sorting_bug494():
""" The input file was made with the command:
tx -t1 -g 0-5 \
source-serif-pro/Roman/Instances/Regular/font.ufo bug494.pfa
The bug is that two subroutines in the Windows CFF output are swapped in
index order from the Mac version. This was because of an unstable
'qsort' done on the subroutines in the final stage of selection."""
font_path = get_input_path('bug494.pfa')
cff_path = get_temp_file_path()
dcf_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cff', '*S', 'std', '*b',
'-f', font_path, cff_path])
runner(CMD + ['-a', '-o', 'dcf', '-f', cff_path, dcf_path])
expected_path = get_expected_path('bug494.dcf.txt')
assert differ([expected_path, dcf_path])
@pytest.mark.parametrize('args, exp_filename', [([], 'roundtrip'),
(['g', '_0-1'], 'subset')])
@pytest.mark.parametrize('to_format', ['t1', 'cff', 'afm'])
def test_recalculate_font_bbox_bug618(to_format, args, exp_filename):
font_path = get_input_path('bug618.pfa')
save_path = get_temp_file_path()
runner(CMD + ['-f', font_path, save_path, '-o', to_format] + args)
file_ext = to_format
if to_format == 't1':
file_ext = 'pfa'
elif to_format == 'afm':
file_ext = 'txt'
expected_path = get_expected_path(
f'bug618/{exp_filename}.{file_ext}')
diff_mode = []
if to_format == 'cff':
diff_mode = ['-m', 'bin']
skip = []
if to_format == 'afm':
skip = ['-s', 'Comment Creation Date:' + SPLIT_MARKER +
'Comment Copyright']
elif to_format == 't1':
skip = ['-s'] + PFA_SKIP[:]
assert differ([expected_path, save_path] + diff_mode + skip)
def test_glyph_bboxes_bug655():
actual_path = runner(CMD + ['-s', '-o', 'mtx', '2', '-f', 'bug655.ufo'])
expected_path = get_expected_path('bug655.txt')
assert differ([expected_path, actual_path])
@pytest.mark.parametrize('filename', ['SHSVF_9b3b', 'bug684'])
def test_cs_opt_bug684(filename):
""" The input CFF2 variable font contains a long single charstring
making the maximum use of the operand stack.
tx was generating a bad CFF2 charstring that would overflow
the operand stack of the standard size (513) after re-converted
to CFF2 unless -no_opt option is specified."""
font_path = get_input_path(f'{filename}.otf')
result_path = get_temp_file_path()
expected_path = get_expected_path(f'{filename}.cff2')
runner(CMD + ['-a', '-o', 'cff2', '-f', font_path, result_path])
assert differ([expected_path, result_path, '-m', 'bin'])
def test_standard_apple_glyph_names():
actual_path = runner(CMD + ['-s', '-o', 'dump', '4', '-f', 'post-v2.ttf'])
expected_path = get_expected_path('post-v2.txt')
assert differ([expected_path, actual_path])
def test_ufo_self_closing_dict_element_bug701():
actual_path = runner(CMD + ['-s', '-o', 'dump', '0', '-f', 'bug701.ufo'])
expected_path = get_expected_path('bug701.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
def test_ufo3_guideline_bug705():
actual_path = runner(CMD + ['-s', '-o', 't1', '-f', 'bug705.ufo'])
expected_path = get_expected_path('bug705.pfa')
assert differ([expected_path, actual_path] + ['-s'] + PFA_SKIP)
def test_ufo_vertical_advance_bug786():
actual_path = runner(CMD + ['-s', '-o', 't1', '-f', 'bug786.ufo'])
expected_path = get_expected_path('bug786.pfa')
skip = ['-s'] + PFA_SKIP[:]
assert differ([expected_path, actual_path] + skip)
@pytest.mark.parametrize('filename', [
'a', # AE glyph in both default and processed layers
'b', # AE glyph in default layer only
'c', # AE glyph in processed layer only
])
def test_ufo_read_processed_contents_plist_bug740(filename):
actual_path = runner(CMD + ['-s', '-o', 'dump', '6', 'g', '_AE',
'-f', f'bug740/{filename}.ufo'])
expected_path = get_expected_path(f'bug740/{filename}.txt')
assert differ([expected_path, actual_path])
def test_dcf_with_infinite_recursion_bug775():
font_path = get_bad_input_path('subr_test_font_infinite_recursion.otf')
dcf_path = get_temp_file_path()
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-a', '-o', 'dcf', '-f', font_path, dcf_path])
assert(err.value.returncode == 1) # exit code of 1, not segfault of -11
expected_path = get_expected_path(
'subr_test_font_infinite_recursion.dcf.txt')
assert differ([expected_path, dcf_path])
def test_dcf_call_depth_with_many_calls_bug846():
# This font was getting an invalid subroutine count because tx wasn't
# decrementing the subroutine call depth after the subroutine calls,
# so it was effectively just counting the total number of calls,
# not the call depth.
font_path = get_input_path('SHSansJPVFTest_SUBR.otf')
dcf_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'dcf', '-f', font_path, dcf_path])
expected_path = get_expected_path('SHSansJPVFTest_SUBR.dcf.txt')
assert differ([expected_path, dcf_path])
def test_svg_with_cid_font_bug822():
font_path = get_input_path('cid.otf')
cid_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'svg', '-f', font_path, cid_path])
expected_path = get_expected_path('cid.svg')
assert differ([expected_path, cid_path])
@pytest.mark.parametrize('filename',
['type1-noPSname.pfa', 'cidfont-noPSname.ps'])
def test_svg_missing_fontname_bug883(filename):
font_path = get_input_path(filename)
svg_path = get_temp_file_path()
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-a', '-o', 'svg', '-f', font_path, svg_path])
assert(err.value.returncode == 6) # exit code of 6, not segfault of -11
@pytest.mark.parametrize('option', ['dump', 'dcf'])
def test_read_fdselect_format_4(option):
font_name = 'fdselect4.otf'
input_path = get_input_path(font_name)
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', option, '-f', input_path, output_path])
expected_path = get_expected_path(font_name + '.' + option)
assert differ([expected_path, output_path, '-s', '## Filename'])
def test_write_fdselect_format_4():
font_name = 'FDArrayTest257FontDicts.otf'
input_path = get_input_path(font_name)
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cff2', '-f', input_path, output_path])
expected_path = get_expected_path('FDArrayTest257FontDicts.cff2')
assert differ([expected_path, output_path, '-m', 'bin'])
@pytest.mark.parametrize('option', ['cff', 'dcf'])
@pytest.mark.parametrize('font_name',
['bug895_charstring.otf', 'bug895_private_dict.otf'])
def test_read_short_charstring_bug895(option, font_name):
input_path = get_bad_input_path(font_name)
output_path = runner(CMD + ['-s', '-e', '-a', '-o', option,
'-f', input_path])
expected_path = get_expected_path(font_name + '.' + option)
skip = ['-s', 'tx: ---'] # skip line with filename
assert differ([expected_path, output_path] + skip)
@pytest.mark.parametrize('option', ['cff2', 'cff'])
def test_drop_defaultwidthx_when_writing_cff2_bug897(option):
input_path = get_bad_input_path('bug897.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', option, '-f', input_path, output_path])
dcf_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'dcf', '-f', output_path, dcf_path])
expected_path = get_expected_path('bug897.' + option + '.dcf')
assert differ([expected_path, dcf_path])
@pytest.mark.parametrize('option', ['afm', 'dump', 'svg'])
def test_missing_glyph_names_pr905(option):
input_path = get_bad_input_path('pr905.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', option, '-f', input_path, output_path])
expected_path = get_expected_path('pr905' + '.' + option)
if option == 'afm':
skip = ['-s',
'Comment Creation Date:' + SPLIT_MARKER + 'Comment Copyright']
elif option == 'dump':
skip = ['-s', '## Filename']
else:
skip = []
assert differ([expected_path, output_path] + skip)
def test_missing_glyph_names_pr905_cef():
input_path = get_bad_input_path('pr905.otf')
output_path = get_temp_file_path()
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-a', '-o', 'cef', '-f', input_path, output_path])
assert(err.value.returncode > 0) # error code, not segfault of -11
def test_var_bug_913():
# AdobeVFPrototype_mod.otf is a modified copy of AdobeVFPrototype.otf 1.003
# so that the region indexes in HVAR are listed in a different order from
# those in CFF2. Also MVAR table has been modified to contain (dummy)
# deltas for underline offset and underline thickness just to exercize
# MVAR lookup code.
font_path = get_input_path('AdobeVFPrototype_mod.otf')
save_path = get_temp_file_path()
runner(CMD + ['-a', '-o',
'3', 'g', '_A,W,y', 'U', '_900,0',
'-f', font_path, save_path])
expected_path = get_expected_path('bug913.txt')
assert differ([expected_path, save_path, '-s', '## Filename'])
def test_bad_charset():
font_path = get_bad_input_path('bad_charset.otf')
save_path = get_temp_file_path()
runner(CMD + ['-a', '-f', font_path, save_path])
expected_path = get_expected_path('bad_charset.txt')
assert differ([expected_path, save_path, '-s', '## Filename'])
def test_bug_940():
input_path = get_bad_input_path('bug940_private_blend.otf')
output_path = get_temp_file_path()
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-a', '-o', 'cff2', '-f', input_path, output_path])
assert(err.value.returncode > 0) # error code, not segfault or success
def test_too_many_glyphs_pr955():
input_path = get_bad_input_path('TooManyGlyphsCFF2.otf')
output_path = get_temp_file_path()
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-a', '-o', 'cff', '-f', input_path, output_path])
assert(err.value.returncode > 0) # error code, not hang or success
def test_ttread_varinst():
font_path = get_input_path('AdobeVFPrototype.ttf')
save_path = get_temp_file_path()
runner(CMD + ['-a', '-o', '3', 'g', '_A', 'U', '_500,800',
'-f', font_path, save_path])
expected_path = get_expected_path('vfproto_tt_inst500_800.txt')
assert differ([expected_path, save_path, '-s', '## Filename'])
def test_unused_post2_names():
font_path = get_input_path('SourceSansPro-Regular-cff2-unused-post.otf')
save_path = get_temp_file_path()
runner(CMD + ['-a', '-o', '1', '-f', font_path, save_path])
expected_path = get_expected_path('ssr-cff2-unused-post.txt')
assert differ([expected_path, save_path, '-s', '## Filename'])
def test_seac_reporting():
# This test aims to show that the SEAC operator
# is not reported by all tx modes
font_path = get_input_path('seac.otf')
save_path = get_temp_file_path()
runner(CMD + ['-a', '-o', '6', '-f', font_path, save_path])
expected_path = get_expected_path('seac.dump.txt')
assert differ([expected_path, save_path])
runner(CMD + ['-a', '-o', 'dcf', '5', 'T', '_c',
'-f', font_path, save_path])
expected_path = get_expected_path('seac.dcf.txt')
assert differ([expected_path, save_path])
def test_date_and_time_afm():
"""
test the use of date and time functions in absfont_afm.c
"""
input_path = get_input_path('font.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'afm', '-f', input_path, output_path])
now = time.time()
year = '%s' % time.localtime().tm_year
with open(output_path) as output_file:
lines = output_file.readlines()
file_year = lines[1].split()[2]
assert year == file_year
file_time_str = lines[2].split(': ')[1].strip()
file_time = time.mktime(
time.strptime(file_time_str, '%a %b %d %H:%M:%S %Y'))
hours_diff = abs(now - file_time) / 3600
assert(hours_diff < 1)
def test_date_and_time_ps():
"""
test the use of date and time functions in absfont_draw.c
"""
input_path = get_input_path('font.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'ps', '-f', input_path, output_path])
now = time.time()
with open(output_path) as output_file:
lines = output_file.readlines()
date_str = re.split(r'[()]', lines[5])[1]
date_str = date_str.split(': ')[1]
time_str = re.split(r'[()]', lines[7])[1]
time_str = time_str.split(': ')[1]
file_date_and_time_str = date_str + ' ' + time_str
file_time = time.mktime(
time.strptime(file_date_and_time_str, '%m/%d/%y %H:%M'))
hours_diff = abs(now - file_time) / 3600
assert(hours_diff < 1)
def test_date_and_time_pdf():
"""
test the use of date and time functions in pdfwrite.c
"""
input_path = get_input_path('font.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'pdf', '-f', input_path, output_path])
now = time.time()
tz = time.timezone
tz_hr = abs(int(tz / 3600)) # ignore sign since we're splitting on +/-
tz_min = (tz % 3600) // 60
with open(output_path) as output_file:
lines = output_file.readlines()
creation_date_str = re.split(r'[()]', lines[13])[1]
mod_date_str = re.split(r'[()]', lines[14])[1]
assert(creation_date_str == mod_date_str)
(date_time_str, tz_hr_str, tz_min_str) = \
re.split(r"[:+\-Z']", creation_date_str)[1:4]
creation_time = time.mktime(
time.strptime(date_time_str, '%Y%m%d%H%M%S'))
hours_diff = abs(now - creation_time) / 3600
assert(hours_diff < 1)
creation_tz_hr = int(tz_hr_str)
assert(creation_tz_hr == tz_hr)
creation_tz_min = int(tz_min_str)
assert(creation_tz_min == tz_min)
file_date_str = re.split(r"[():]", lines[36])[2].strip()
file_time_str = re.split(r"[() ]", lines[38])[3]
file_date_time_str = file_date_str + ' ' + file_time_str
file_time = time.mktime(
time.strptime(file_date_time_str, "%d %b %y %H:%M"))
hours_diff = abs(now - file_time) / 3600
assert(hours_diff < 1)
def test_overlap_removal():
input_path = get_input_path('overlaps.ufo')
expected_path = get_expected_path('overlaps.pfa')
output_path = get_temp_file_path()
args = [TOOL, '-t1', '+V', '-o', output_path, input_path]
subprocess.call(args)
assert differ([expected_path, output_path, '-s', PFA_SKIP[0]])
@pytest.mark.parametrize("fmt", [
"cff",
"cff2",
])
def test_nonstd_fontmatrix(fmt):
input_path = get_input_path("nonstdfmtx.otf")
txt_filename = f"nonstdfmtx_{fmt}.txt"
expected_path = get_expected_path(txt_filename)
output_dir = get_temp_dir_path()
bin_output = os.path.join(output_dir, f"nonstdfmtx.{fmt}")
output_path = os.path.join(output_dir, txt_filename)
runner(CMD + ['-a', '-o', fmt, '*S', '*b', '-f', input_path, bin_output])
runner(CMD + ['-a', '-o', 'dump', '-f', bin_output, output_path])
skip = "## Filename "
assert differ([expected_path, output_path, '-s', skip])
def test_pdf_single_glyph():
input_path = get_input_path("bug1218.otf")
pdf_filename = "bug1218.pdf"
expected_path = get_expected_path(pdf_filename)
output_dir = get_temp_dir_path()
output_path = os.path.join(output_dir, pdf_filename)
runner(CMD + ['-a', '-o', 'pdf', '1', '-f', input_path, output_path])
skip = PDF_SKIP[:]
skip.insert(0, '-s')
regex_skip = PDF_SKIP_REGEX[:]
for regex in regex_skip:
skip.append('-r')
skip.append(regex)
assert differ([expected_path, output_path] + skip)
def test_cffread_bug1343():
"""
Check FontBBox values
"""
actual_path = runner(CMD + ['-s', '-f', 'font.otf', '-o', '3'])
expected_path = get_expected_path('font.otf.dump3.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
@pytest.mark.parametrize('arg, input, output, expected', [
('ufo', 'cidfont.subset', 'cidfont_subset.ufo', 'testCID.ufo'),
('t1', 'testCID.ufo', 'cidfont_subset.ufo', 'cidfont.subset'),
(('ufo', 't1'), 'cidfont.subset', 'cidfont_subset.ufo', 'cidfont.subset'),
(('t1', 'ufo'), 'testCID.ufo', 'cidfont_subset.ufo', 'testCID.ufo'),
])
def test_cidkeyed_read_write(arg, input, output, expected):
"""
Tests reading & writing CID-Keyed fonts in tx (uforead & ufowrite)
CID -> UFO (one-way test)
UFO -> CID (one-way test)
CID -> UFO -> CID (round-trip test)
UFO -> CID -> UFO (round-trip test)
"""
folder = "cid_roundtrip/"
input_path = get_input_path(folder + input)
output_dir = get_temp_dir_path()
output_path = os.path.join(output_dir, output)
expected_path = get_expected_path(folder + expected)
if isinstance(arg, tuple): # round-trip tests
runner(CMD + ['-a', '-o', arg[0], '-f',
input_path, output_path])
final_output_dir = get_temp_dir_path()
final_output_path = os.path.join(final_output_dir, output)
runner(CMD + ['-a', '-o', arg[1], '-f',
output_path, final_output_path])
output_path = final_output_path
else: # one-way tests
runner(CMD + ['-a', '-o', arg, '-f',
input_path, output_path])
if '.subset' in expected_path:
expected_path = generate_ps_dump(expected_path)
output_path = generate_ps_dump(output_path)
assert differ([expected_path, output_path])
@pytest.mark.parametrize("file", [
"missing_CID.ufo",
"missing_iFD.ufo",
])
def test_cidkeyed_lib_missing(file):
folder = folder = "cidkeyed_missing_lib/"
ufo_input_path = get_input_path(folder + file)
arg = [TOOL, '-t1', '-f', ufo_input_path]
assert subprocess.call(arg) == 6
def test_cff2_windows_line_endings_bug1355():
# Testing writing binary to stdout on Windows
# to ensure line endings are not inserted.
font_path = get_input_path('regular_CFF2.otf')
actual_path = runner(CMD + ['-s', '-a', '-o', 'cff2',
'*S', '*b', '-f', font_path])
expected_path = get_expected_path('bug1355.cff2')
assert differ([expected_path, actual_path, '-m', 'bin'])
|
import os
import pytest
import re
import subprocess
import time
from afdko.fdkutils import (
get_temp_file_path,
get_temp_dir_path,
)
from test_utils import (
get_input_path,
get_bad_input_path,
get_expected_path,
generate_ps_dump,
)
from runner import main as runner
from differ import main as differ, SPLIT_MARKER
TOOL = 'tx'
CMD = ['-t', TOOL]
def _get_extension(in_format):
if 'ufo' in in_format:
return '.ufo'
elif in_format == 'type1':
return '.pfa'
return '.' + in_format
PDF_SKIP = [
'/Creator' + SPLIT_MARKER +
'/Producer' + SPLIT_MARKER +
'/CreationDate' + SPLIT_MARKER +
'/ModDate' + SPLIT_MARKER +
'(Date:' + SPLIT_MARKER +
'(Time:',
]
PDF_SKIP_REGEX = [
'^.+30.00 Td',
'^.+0.00 Td',
]
PS_SKIP = [
'0 740 moveto (Filename:' + SPLIT_MARKER +
'560 (Date:' + SPLIT_MARKER +
'560 (Time:'
]
PS_SKIP2 = [
'%ADOt1write:'
]
PFA_SKIP = [
'%ADOt1write:' + SPLIT_MARKER +
'%%Copyright:' + SPLIT_MARKER
]
# -----------
# Basic tests
# -----------
@pytest.mark.parametrize('arg', ['-h', '-v', '-u'])
def test_exit_known_option(arg):
assert subprocess.call([TOOL, arg]) == 0
@pytest.mark.parametrize('arg', ['-bar', '-foo'])
def test_exit_unknown_option(arg):
assert subprocess.call([TOOL, arg]) == 1
@pytest.mark.parametrize('pth', [
['invalid_path'], # no such file or directory
[get_temp_file_path()], # end of file (not a font)
[get_input_path('type1.pfa'), 'a', 'b'], # too many file args
])
def test_exit_invalid_path_or_font(pth):
assert subprocess.call([TOOL] + pth) == 1
# -------------
# Options tests
# -------------
@pytest.mark.parametrize('args', [
['-s', '-t1'], # '-s' option must be last
['-t1', '-g', '0', '-gx', '1'], # options are mutually exclusive
['-dcf'], # non-CFF font
['-ps', '-1'], # must specify an all-glyph range
['-ufo'], ['-t1', '-pfb'], # must specify a destination path
['-t1', '-usefd'], # bad arg
['-t1', '-decid'], # input font is non-CID
])
def test_option_error_type1_input(args):
font_path = get_input_path('type1.pfa')
assert subprocess.call([TOOL] + args + [font_path]) == 1
@pytest.mark.parametrize('arg', ['-e', '-q', '+q', '-w', '+w', '-lf', '-cr',
'-crlf', '-decid', '-LWFN', '-pfb'])
def test_option_error_type1_clash(arg):
# options -pfb or -LWFN may not be used with other options
pfb = '-pfb' if arg != '-pfb' else '-LWFN'
assert subprocess.call([TOOL, '-t1', pfb, arg]) == 1
@pytest.mark.parametrize('args', [
['-cff', '-l'], ['-cff', '-0'], ['-cff', '-1'], ['-cff', '-2'],
['-cff', '-3'], ['-cff', '-4'], ['-cff', '-5'], ['-cff', '-6'],
['-cff', '-q'], ['-cff', '+q'], ['-cff', '-w'], ['-cff', '+w'],
['-cff', '-pfb'], ['-cff', '-usefd'], ['-cff', '-decid'],
['-cff', '-lf'], ['-cff', '-cr'], ['-cff', '-crlf'], ['-cff', '-LWFN'],
['-t1', '-gn0'], ['-t1', '-gn1'], ['-t1', '-gn2'], ['-t1', '-sa'],
['-t1', '-abs'], ['-t1', '-cefsvg'],
['-t1', '-no_futile'], ['-t1', '-no_opt'], ['-t1', '-d'], ['-t1', '+d'],
['-dcf', '-n'], ['-dcf', '-c'],
['-dump', '-E'], ['-dump', '+E'], ['-dump', '-F'], ['-dump', '+F'],
['-dump', '-O'], ['-dump', '+O'], ['-dump', '-S'], ['-dump', '+S'],
['-dump', '-T'], ['-dump', '+T'], ['-dump', '-V'], ['-dump', '+V'],
['-dump', '-b'], ['-dump', '+b'], ['-dump', '-e'], ['-dump', '+e'],
['-dump', '-Z'], ['-dump', '+Z'],
])
def test_option_error_wrong_mode(args):
assert subprocess.call([TOOL] + args) == 1
@pytest.mark.parametrize('arg', [
'-a', '-e', '-f', '-g', '-i', '-m', '-o', '-p', '-A', '-P', '-U', '-maxs',
'-usefd', '-fd', '-dd', '-sd', '-sr', ['-cef', '-F'], ['-dcf', '-T']
])
def test_option_error_no_args_left(arg):
if isinstance(arg, list):
arg_lst = [TOOL] + arg
else:
arg_lst = [TOOL, '-t1', arg]
assert subprocess.call(arg_lst) == 1
@pytest.mark.parametrize('args', [
['-maxs', 'X'], ['-m', 'X'], ['-e', 'X'], ['-e', '5'],
['-usefd', 'X'], ['-usefd', '-1']
])
def test_option_error_bad_arg(args):
assert subprocess.call([TOOL, '-t1'] + args) == 1
@pytest.mark.parametrize('arg2', ['-sd', '-sr', '-dd'])
@pytest.mark.parametrize('arg1', ['-a', '-f', '-A'])
def test_option_error_no_args_left2(arg1, arg2):
assert subprocess.call([TOOL, '-t1', arg1, arg2]) == 1
@pytest.mark.parametrize('arg2', ['-sd', '-sr', '-dd'])
@pytest.mark.parametrize('arg1', ['-a', '-f'])
def test_option_error_empty_list(arg1, arg2):
empty_dir = get_temp_dir_path()
assert subprocess.call([TOOL, '-t1', arg1, arg2, empty_dir]) == 1
@pytest.mark.parametrize('arg', ['-bc', '-z', '-cmp', '-sha1'])
def test_gone_options_bc(arg):
assert subprocess.call([TOOL, arg]) == 1
@pytest.mark.parametrize('mode, msg', [
('-h', b'tx (Type eXchange) is a test harness'),
('-u', b'tx {[mode][mode options][shared options][files]}*'),
('-afm', b'[-afm options: default none]'),
('-cef', b'[-cef options: default none]'),
('-cff', b'[-cff options: defaults -E, -F, -O, -S, +T, -V, -Z, -b, -d]'),
('-cff2', b'[-cff2 options: defaults -S, -b]'),
('-dcf', b'[-dcf options: defaults -T all, -5]'),
('-dump', b'[-dump options: default -1]'),
('-mtx', b'[-mtx options: default -0]'),
('-path', b'[-path options: default -0]'),
('-pdf', b'[-pdf options: default -0]'),
('-ps', b'[-ps options: default -0]'),
('-svg', b'[-svg options: defaults -lf, -gn0]'),
('-t1',
b'[-t1 options: defaults -0, -l, -E, -S, +T, -V, +q, -w, -e 4, -lf]'),
('-ufo', b'[-ufo options: default none]'),
])
def test_mode_help(mode, msg):
output = subprocess.check_output([TOOL, mode, '-h'])
assert msg in output
@pytest.mark.parametrize('dcf_dump_level', ['0', '1', '5'])
def test_script_file(dcf_dump_level):
font_path = get_input_path('cid.otf')
opts_path = get_temp_file_path()
opts_file_content = f'\n# foo\n # bar\r -{dcf_dump_level}\t"{font_path}"'
with open(opts_path, 'a') as fp:
fp.write(opts_file_content)
actual_path = runner(CMD + ['-s', '-a', '-o', 'dcf', 's', '-f', opts_path])
expected_path = get_expected_path(f'cid_dcf_{dcf_dump_level}.txt')
assert differ([expected_path, actual_path])
def test_nested_script():
# nested scripts not allowed
temp_path = get_temp_file_path()
assert subprocess.call([TOOL, '-s', 'foobar', '-s', temp_path]) == 1
@pytest.mark.parametrize('layer_name', ['', 'None', 'background', 'foobar'])
def test_ufo_altlayer(layer_name):
if not layer_name:
fname = 'processed'
args = []
else:
fname = 'foreground' if layer_name == 'None' else layer_name
args = ['altLayer', f'_{fname}']
actual_path = runner(CMD + ['-s', '-f', 'altlayer.ufo', '-o', '6'] + args)
expected_path = get_expected_path(f'altlayer_{fname}.txt')
assert differ([expected_path, actual_path])
@pytest.mark.parametrize('arg, filename', [
('-a', 'ufo3.t1'),
('-A', 'SourceSansPro-Regular.t1'),
])
def test_a_options(arg, filename):
input_path = get_input_path('ufo3.ufo')
output_path = os.path.join(os.getcwd(), filename)
assert os.path.exists(output_path) is False
subprocess.call([TOOL, '-t1', arg, input_path])
assert os.path.exists(output_path) is True
os.remove(output_path)
def test_o_option():
input_path = get_input_path('ufo3.ufo')
expected_path = get_expected_path('ufo3.pfa')
output_path = get_temp_file_path()
subprocess.call([TOOL, '-t1', '-o', output_path, input_path])
assert differ([expected_path, output_path, '-s', PFA_SKIP[0]])
def test_f_option():
fpath1 = get_input_path('type1.pfa')
fpath2 = get_input_path('cff2_vf.otf')
actual_path = runner(CMD + ['-s', '-o', 'mtx', '3',
'f', f'_{fpath1}', f'_{fpath2}'])
expected_path = get_expected_path('mtx_f_options.txt')
assert differ([expected_path, actual_path])
def test_stdin():
input_path = get_input_path('type1.pfa')
expected_path = get_expected_path('stdin.txt')
output_path = get_temp_file_path()
with open(input_path) as fp:
output = subprocess.check_output([TOOL], stdin=fp)
with open(output_path, 'wb') as fp:
fp.write(output)
assert differ([expected_path, output_path])
@pytest.mark.parametrize('arg', ['0', '-16'])
def test_m_option_success(arg):
# mem_manage() is called 16 times with the command 'tx -m 0 type1.pfa'
input_path = get_input_path('type1.pfa')
assert subprocess.call([TOOL, '-m', arg, input_path]) == 0
# Disabled because of https://github.com/adobe-type-tools/afdko/issues/933
# @pytest.mark.parametrize('arg', range(1, 16))
# def test_m_option_fail(arg):
# input_path = get_input_path('type1.pfa')
# assert subprocess.call([TOOL, '-m', f'-{arg}', input_path]) != 0
@pytest.mark.parametrize('arg, exp_filename', [(None, 'not_removed'),
('-V', 'not_removed'),
('+V', 'removed')])
def test_V_option(arg, exp_filename):
input_path = get_input_path('overlap.pfa')
expected_path = get_expected_path(f'overlap_{exp_filename}.pfa')
output_path = get_temp_file_path()
args = [TOOL, '-t1', '-o', output_path, input_path]
if arg:
args.insert(2, arg)
subprocess.call(args)
assert differ([expected_path, output_path] + ['-s'] + PFA_SKIP)
# -------------
# Convert tests
# -------------
@pytest.mark.parametrize('to_format', [
'ufo2',
'ufo3',
'type1',
'svg',
'mtx',
'afm',
'pdf',
'ps',
'cff',
])
@pytest.mark.parametrize('from_format', [
'ufo2',
'ufo3',
'type1',
])
def test_convert(from_format, to_format):
from_ext = _get_extension(from_format)
to_ext = _get_extension(to_format)
# input filename
from_filename = from_format + from_ext
# expected filename
exp_filename = from_format + to_ext
# runner args
if 'ufo' in to_format:
save_path = get_temp_dir_path('font.ufo')
else:
save_path = get_temp_file_path()
# diff mode
if to_format == 'cff':
diff_mode = ['-m', 'bin']
else:
diff_mode = []
# skip items
regex_skip = []
skip = []
if to_format == 'afm':
skip = ['Comment Creation Date:' + SPLIT_MARKER + 'Comment Copyright']
elif to_format == 'pdf':
skip = PDF_SKIP[:]
regex_skip = PDF_SKIP_REGEX[:]
elif to_format == 'ps':
skip = PS_SKIP[:]
elif to_format == 'type1':
skip = PFA_SKIP[:]
if skip:
skip.insert(0, '-s')
if regex_skip:
for regex in regex_skip:
skip.append('-r')
skip.append(regex)
# format arg fix
if to_format in ('ufo2', 'ufo3'):
format_arg = 'ufo'
elif to_format == 'type1':
format_arg = 't1'
else:
format_arg = to_format
runner(CMD + ['-a', '-f', get_input_path(from_filename), save_path,
'-o', format_arg])
expected_path = get_expected_path(exp_filename)
assert differ([expected_path, save_path] + skip + diff_mode)
def test_cef_cefsvg():
font_path = get_input_path('cff2_vf.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cef', 'cefsvg', 'cr', 'gn1', 'abs', 'sa',
'-f', font_path, output_path])
expected_path = get_expected_path('cef_cefsvg_cr.svg')
assert differ([expected_path, output_path])
@pytest.mark.parametrize('file_ext', [
'pfa', 'pfabin', 'pfb', 'lwfn', 'bidf']) # TODO: 'bidf85'
def test_type1_inputs(file_ext):
bidf = '.bidf' if 'bidf' in file_ext else ''
actual_path = runner(CMD + ['-s', '-o', '2', '-f', f'type1.{file_ext}'])
expected_path = get_expected_path(f'type1.dump2{bidf}.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
@pytest.mark.parametrize('args', [[], ['U', '_500,500'], ['U', '_0,0', 'n']])
@pytest.mark.parametrize('fname', ['zx', 'zy'])
def test_type1mm_inputs(fname, args):
fname2 = f'.{"".join(args)}' if args else ''
actual_path = runner(CMD + ['-s', '-f', f'{fname}.pfb', '-o', '2'] + args)
expected_path = get_expected_path(f'{fname}.dump2{fname2}.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
@pytest.mark.parametrize('fext', ['otf', 'ttf', 'cff', 'cef', 'ttc'])
def test_other_input_formats(fext):
arg = ['y'] if fext == 'ttc' else []
actual_path = runner(CMD + ['-s', '-f', f'font.{fext}', '-o', '3'] + arg)
expected_path = get_expected_path(f'font.{fext}.dump3.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
# ----------
# Dump tests
# ----------
@pytest.mark.parametrize('args', [
[],
['0'],
['dump', '0'],
['1'],
['2'],
['3'],
['4'],
['4', 'N'],
['5'],
['6'],
['6', 'd'],
['6', 'n'],
])
@pytest.mark.parametrize('font_filename', ['type1.pfa', 'svg.svg'])
def test_dump_option(args, font_filename):
if any([arg in args for arg in ('4', '5', '6')]):
skip = []
else:
skip = ['-s', '## Filename']
head = font_filename.split('.')[0]
midl = ''.join(args) if args else 'dump1'
if 'dump' not in midl:
midl = f'dump{midl}'
exp_filename = f'{head}.{midl}.txt'
opts = ['-o'] + args if args else []
actual_path = runner(CMD + ['-s', '-f', font_filename] + opts)
expected_path = get_expected_path(exp_filename)
assert differ([expected_path, actual_path] + skip)
@pytest.mark.parametrize('fext', ['pfa', 'ufo'])
def test_dump_flex_op(fext):
fname = 'flex'
actual_path = runner(CMD + ['-s', '-o', '6', '-f', f'{fname}.{fext}'])
expected_path = get_expected_path(f'{fname}.txt')
assert differ([expected_path, actual_path])
# ----------
# CFF2 tests
# ----------
@pytest.mark.parametrize('filename, msg', [
('avar_invalid_table_version',
b'(cfr) invalid avar table version'),
('fvar_invalid_table_version',
b'(cfr) invalid fvar table version'),
('avar_invalid_table_size',
b'(cfr) invalid avar table size'),
('fvar_invalid_table_size',
b'(cfr) invalid fvar table size'),
('fvar_invalid_table_header',
b'(cfr) invalid values in fvar table header'),
('avar_invalid_axis-instance_count-size',
b'(cfr) invalid avar table size or axis/instance count/size'),
('fvar_invalid_axis-instance_count-size',
b'(cfr) invalid fvar table size or axis/instance count/size'),
('avar_axis_value_map_out_of_bounds',
b'(cfr) avar axis value map out of bounds'),
('avar_fvar_axis_mismatch',
b'(cfr) mismatching axis counts in fvar and avar'),
])
def test_varread_errors(filename, msg):
font_path = get_bad_input_path(f'vf_{filename}.otf')
output = subprocess.check_output([TOOL, '-dcf', '-0', font_path],
stderr=subprocess.STDOUT)
assert msg in output
@pytest.mark.parametrize('args, exp_filename', [
([], 'SourceCodeVar-Roman_CFF2'),
(['*S', '*b', 'std'], 'SourceCodeVar-Roman_CFF2_subr'), # subroutinize
])
def test_cff2_extract(args, exp_filename):
# read CFF2 VF, write CFF2 table
font_path = get_input_path('SourceCodeVariable-Roman.otf')
cff2_path = get_temp_file_path()
runner(CMD + ['-a', '-f', font_path, cff2_path, '-o', 'cff2'] + args)
expected_path = get_expected_path(exp_filename)
assert differ([expected_path, cff2_path, '-m', 'bin'])
def test_cff2_sub_dump():
# Dump a subroutinized CFF2 font. This is a J font with 64K glyphs,
# and almost every subr and charstring is a single subr call.
# A good test for problems with charstrings with no endchar operator.
actual_path = runner(CMD + ['-s', '-o', 'dump', '6', 'g', '_21847',
'-f', 'CFF2-serif-sub.cff2'])
expected_path = get_expected_path('CFF2-serif-sub.cff2.txt')
assert differ([expected_path, actual_path])
def test_varread_pr355():
# read CFF2 VF, write Type1 snapshot
# Note that cff2_vf is built from the sources at:
# afdko/tests/buildmasterotfs_data/input/cff2_vf.
actual_path = runner(CMD + ['-s', '-o', 't1', '-f', 'cff2_vf.otf'])
expected_path = get_expected_path('cff2_vf.pfa')
skip = ['-s'] + PFA_SKIP[:]
assert differ([expected_path, actual_path] + skip)
def test_cff2_no_vf_bug353():
# read CFF2 WITHOUT VF info, write a CFF2 out. 'regular_CFF2.otf'
# is derived by taking the regular.otf file from the sfntdiff
# 'input_data' directory, and converting the CFF table to CFF2.
font_path = get_input_path('regular_CFF2.otf')
cff2_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cff2', '-f', font_path, cff2_path])
expected_path = get_expected_path('regular_CFF2.cff2')
assert differ([expected_path, cff2_path, '-m', 'bin'])
def test_cff2_with_spare_masters_pr835():
# SetNumMasters was incorrectly passing the number of region indices to
# var_getIVSRegionIndices for the regionListCount. With PR #835 it now
# passes the total region count for regionListCount.
#
# Example of the bug -- this command:
# tx -cff2 +S +b -std SHSansJPVFTest.otf SHSansJPVFTest.cff2
# Would produce the following warning & error:
# inconsistent region indices detected in item variation store subtable 1
# memory error
font_path = get_input_path('SHSansJPVFTest.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o',
'cff2', '*S', '*b', 'std',
'-f', font_path, output_path])
expected_path = get_expected_path('SHSansJPVFTest.cff2')
assert differ([expected_path, output_path, '-m', 'bin'])
@pytest.mark.parametrize('vector, exp_filename', [
('9999,9999,9999,9999,999,9', 'psname_last_resort_no.txt'),
('9999,9999,9999,9999,999,99', 'psname_last_resort_yes.txt'),
])
def test_last_resort_instance_psname(vector, exp_filename):
font_path = get_input_path('cff2_vf_many_axes.otf')
output_path = get_temp_file_path()
runner(CMD + ['-o', '0', 'U', f'_{vector}', '-f', font_path, output_path])
expected_path = get_expected_path(exp_filename)
assert differ([expected_path, output_path, '-s', '## Filename'])
# -----------
# Other tests
# -----------
def test_trademark_string_pr425():
# the copyright symbol used in the trademark field of a UFO is
# converted to 'Copyright' and stored in Notice field of a Type1
actual_path = runner(CMD + ['-s', '-o', 't1', '-f', 'trademark.ufo'])
expected_path = get_expected_path('trademark.pfa')
skip = ['-s'] + PFA_SKIP[:]
assert differ([expected_path, actual_path] + skip)
def test_remove_hints_bug180():
font_path = get_input_path('cid.otf')
cid_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 't1', 'n', '-f', font_path, cid_path])
expected_path = get_expected_path('cid_nohints.ps')
expected_path = generate_ps_dump(expected_path)
actual_path = generate_ps_dump(cid_path)
skip = ['-s'] + PS_SKIP2
assert differ([expected_path, actual_path] + skip)
def test_long_charstring_read_bug444():
# read a CFF2 VF with a charstring longer that 65535, check output
actual_path = runner(CMD + ['-s', '-o', '0', '-f', 'CJK-VarTest.otf'])
expected_path = get_expected_path('CJK-VarTest_read.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
def test_long_charstring_warning():
# read a CFF2 VF with a charstring longer that 65535, check warning message
# NOTE: can't diff the output against 'CJK-VarTest_warn.txt' because on
# Windows the lines start with 'tx.exe:' instead of just 'tx:'
actual_path = runner(
CMD + ['-s', '-e', '-o', '5', '-f', 'CJK-VarTest.otf'])
# expected_path = get_expected_path('CJK-VarTest_warn.txt')
with open(actual_path, 'rb') as f:
output = f.read()
assert b"(cfr) Warning: CharString of GID 1 is 71057 bytes long" in output
def test_long_charstring_write():
# read a CFF2 VF with a charstring longer that 65535, write out CFF2 file
# NOTE: the font 'CJK-VarTest.otf' cannot be used in this test because
# once its long charstring is optimized (floats -> ints) it's no longer
# over the 65535 bytes limit; the long charstring in 'CJK-VarTest2.otf' is
# already as small as possible, so it will trigger the check in cffwrite.c
font_path = get_input_path('CJK-VarTest2.otf')
cff2_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cff2', '-f', font_path, cff2_path])
expected_path = get_expected_path('CJK-VarTest2.cff2')
assert differ([expected_path, cff2_path, '-m', 'bin'])
def test_many_hints_string_bug354():
# The glyph T@gid002 has 33 hstem hints. This tests a bug where
# tx defined an array of only 6 operands.
# This is encountered only when wrinting to a VF CFF2.
font_path = get_input_path('cff2_vf.otf')
cff2_path = get_temp_file_path()
dcf_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cff2', '-f', font_path, cff2_path])
runner(CMD + ['-a', '-o', 'dcf', '-f', cff2_path, dcf_path])
expected_path = get_expected_path('cff2_vf.dcf.txt')
assert differ([expected_path, dcf_path])
def test_non_varying_glyphs_bug356():
"""A glyph which is non-varying in a variable font may be referenced by a
VariationStore data item subtable which has a region count of 0. The VF
support code assumed that this was an error, and issued a false warning.
File 'bug356.otf' is a handcrafted modification of 'cff2_vf.otf'. The
latter cannot be used as-is to validate the fix."""
actual_path = get_temp_file_path()
font_path = get_input_path('bug356.otf')
stderr_path = runner(CMD + ['-s', '-e', '-a', '-o', 'cff',
'-f', font_path, actual_path])
expected_path = get_expected_path('bug356.txt')
assert differ([expected_path, stderr_path, '-l', '1'])
@pytest.mark.parametrize('font_format', ['type1', 'cidfont', 'ufo2', 'ufo3'])
def test_no_psname_dump_bug437(font_format):
if 'cid' in font_format:
file_ext = 'ps'
elif 'ufo' in font_format:
file_ext = 'ufo'
else:
file_ext = 'pfa'
filename = f'{font_format}-noPSname.{file_ext}'
expected_path = get_expected_path(f'bug437/dump-{font_format}.txt')
actual_path = runner(CMD + ['-s', '-o', 'dump', '0', '-f', filename])
assert differ([expected_path, actual_path, '-l', '1'])
@pytest.mark.parametrize('font_format', ['type1', 'cidfont', 'ufo2', 'ufo3'])
def test_no_psname_convert_to_ufo_bug437(font_format):
if 'cid' in font_format:
file_ext = 'ps'
elif 'ufo' in font_format:
file_ext = 'ufo'
else:
file_ext = 'pfa'
font_path = get_input_path(f'{font_format}-noPSname.{file_ext}')
expected_path = get_expected_path(f'bug437/{font_format}.ufo')
save_path = get_temp_dir_path(f'{font_format}.ufo')
runner(CMD + ['-a', '-o', 'ufo', '-f', font_path, save_path])
assert differ([expected_path, save_path])
@pytest.mark.parametrize('font_format', ['type1', 'cidfont', 'ufo2', 'ufo3'])
def test_no_psname_convert_to_type1_bug437(font_format):
if 'cid' in font_format:
file_ext = 'ps'
elif 'ufo' in font_format:
file_ext = 'ufo'
else:
file_ext = 'pfa'
filename = f'{font_format}-noPSname.{file_ext}'
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-o', 't1', '-f', filename])
assert err.value.returncode in (5, 6)
def test_illegal_chars_in_glyph_name_bug473():
font_path = get_input_path('bug473.ufo')
save_path = get_temp_dir_path('bug473.ufo')
runner(CMD + ['-a', '-o', 'ufo', '-f', font_path, save_path])
expected_path = get_expected_path('bug473.ufo')
assert differ([expected_path, save_path])
def test_subroutine_sorting_bug494():
""" The input file was made with the command:
tx -t1 -g 0-5 \
source-serif-pro/Roman/Instances/Regular/font.ufo bug494.pfa
The bug is that two subroutines in the Windows CFF output are swapped in
index order from the Mac version. This was because of an unstable
'qsort' done on the subroutines in the final stage of selection."""
font_path = get_input_path('bug494.pfa')
cff_path = get_temp_file_path()
dcf_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cff', '*S', 'std', '*b',
'-f', font_path, cff_path])
runner(CMD + ['-a', '-o', 'dcf', '-f', cff_path, dcf_path])
expected_path = get_expected_path('bug494.dcf.txt')
assert differ([expected_path, dcf_path])
@pytest.mark.parametrize('args, exp_filename', [([], 'roundtrip'),
(['g', '_0-1'], 'subset')])
@pytest.mark.parametrize('to_format', ['t1', 'cff', 'afm'])
def test_recalculate_font_bbox_bug618(to_format, args, exp_filename):
font_path = get_input_path('bug618.pfa')
save_path = get_temp_file_path()
runner(CMD + ['-f', font_path, save_path, '-o', to_format] + args)
file_ext = to_format
if to_format == 't1':
file_ext = 'pfa'
elif to_format == 'afm':
file_ext = 'txt'
expected_path = get_expected_path(
f'bug618/{exp_filename}.{file_ext}')
diff_mode = []
if to_format == 'cff':
diff_mode = ['-m', 'bin']
skip = []
if to_format == 'afm':
skip = ['-s', 'Comment Creation Date:' + SPLIT_MARKER +
'Comment Copyright']
elif to_format == 't1':
skip = ['-s'] + PFA_SKIP[:]
assert differ([expected_path, save_path] + diff_mode + skip)
def test_glyph_bboxes_bug655():
actual_path = runner(CMD + ['-s', '-o', 'mtx', '2', '-f', 'bug655.ufo'])
expected_path = get_expected_path('bug655.txt')
assert differ([expected_path, actual_path])
@pytest.mark.parametrize('filename', ['SHSVF_9b3b', 'bug684'])
def test_cs_opt_bug684(filename):
""" The input CFF2 variable font contains a long single charstring
making the maximum use of the operand stack.
tx was generating a bad CFF2 charstring that would overflow
the operand stack of the standard size (513) after re-converted
to CFF2 unless -no_opt option is specified."""
font_path = get_input_path(f'{filename}.otf')
result_path = get_temp_file_path()
expected_path = get_expected_path(f'{filename}.cff2')
runner(CMD + ['-a', '-o', 'cff2', '-f', font_path, result_path])
assert differ([expected_path, result_path, '-m', 'bin'])
def test_standard_apple_glyph_names():
actual_path = runner(CMD + ['-s', '-o', 'dump', '4', '-f', 'post-v2.ttf'])
expected_path = get_expected_path('post-v2.txt')
assert differ([expected_path, actual_path])
def test_ufo_self_closing_dict_element_bug701():
actual_path = runner(CMD + ['-s', '-o', 'dump', '0', '-f', 'bug701.ufo'])
expected_path = get_expected_path('bug701.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
def test_ufo3_guideline_bug705():
actual_path = runner(CMD + ['-s', '-o', 't1', '-f', 'bug705.ufo'])
expected_path = get_expected_path('bug705.pfa')
assert differ([expected_path, actual_path] + ['-s'] + PFA_SKIP)
def test_ufo_vertical_advance_bug786():
actual_path = runner(CMD + ['-s', '-o', 't1', '-f', 'bug786.ufo'])
expected_path = get_expected_path('bug786.pfa')
skip = ['-s'] + PFA_SKIP[:]
assert differ([expected_path, actual_path] + skip)
@pytest.mark.parametrize('filename', [
'a', # AE glyph in both default and processed layers
'b', # AE glyph in default layer only
'c', # AE glyph in processed layer only
])
def test_ufo_read_processed_contents_plist_bug740(filename):
actual_path = runner(CMD + ['-s', '-o', 'dump', '6', 'g', '_AE',
'-f', f'bug740/{filename}.ufo'])
expected_path = get_expected_path(f'bug740/{filename}.txt')
assert differ([expected_path, actual_path])
def test_dcf_with_infinite_recursion_bug775():
font_path = get_bad_input_path('subr_test_font_infinite_recursion.otf')
dcf_path = get_temp_file_path()
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-a', '-o', 'dcf', '-f', font_path, dcf_path])
assert(err.value.returncode == 1) # exit code of 1, not segfault of -11
expected_path = get_expected_path(
'subr_test_font_infinite_recursion.dcf.txt')
assert differ([expected_path, dcf_path])
def test_dcf_call_depth_with_many_calls_bug846():
# This font was getting an invalid subroutine count because tx wasn't
# decrementing the subroutine call depth after the subroutine calls,
# so it was effectively just counting the total number of calls,
# not the call depth.
font_path = get_input_path('SHSansJPVFTest_SUBR.otf')
dcf_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'dcf', '-f', font_path, dcf_path])
expected_path = get_expected_path('SHSansJPVFTest_SUBR.dcf.txt')
assert differ([expected_path, dcf_path])
def test_svg_with_cid_font_bug822():
font_path = get_input_path('cid.otf')
cid_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'svg', '-f', font_path, cid_path])
expected_path = get_expected_path('cid.svg')
assert differ([expected_path, cid_path])
@pytest.mark.parametrize('filename',
['type1-noPSname.pfa', 'cidfont-noPSname.ps'])
def test_svg_missing_fontname_bug883(filename):
font_path = get_input_path(filename)
svg_path = get_temp_file_path()
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-a', '-o', 'svg', '-f', font_path, svg_path])
assert(err.value.returncode == 6) # exit code of 6, not segfault of -11
@pytest.mark.parametrize('option', ['dump', 'dcf'])
def test_read_fdselect_format_4(option):
font_name = 'fdselect4.otf'
input_path = get_input_path(font_name)
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', option, '-f', input_path, output_path])
expected_path = get_expected_path(font_name + '.' + option)
assert differ([expected_path, output_path, '-s', '## Filename'])
def test_write_fdselect_format_4():
font_name = 'FDArrayTest257FontDicts.otf'
input_path = get_input_path(font_name)
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'cff2', '-f', input_path, output_path])
expected_path = get_expected_path('FDArrayTest257FontDicts.cff2')
assert differ([expected_path, output_path, '-m', 'bin'])
@pytest.mark.parametrize('option', ['cff', 'dcf'])
@pytest.mark.parametrize('font_name',
['bug895_charstring.otf', 'bug895_private_dict.otf'])
def test_read_short_charstring_bug895(option, font_name):
input_path = get_bad_input_path(font_name)
output_path = runner(CMD + ['-s', '-e', '-a', '-o', option,
'-f', input_path])
expected_path = get_expected_path(font_name + '.' + option)
skip = ['-s', 'tx: ---'] # skip line with filename
assert differ([expected_path, output_path] + skip)
@pytest.mark.parametrize('option', ['cff2', 'cff'])
def test_drop_defaultwidthx_when_writing_cff2_bug897(option):
input_path = get_bad_input_path('bug897.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', option, '-f', input_path, output_path])
dcf_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'dcf', '-f', output_path, dcf_path])
expected_path = get_expected_path('bug897.' + option + '.dcf')
assert differ([expected_path, dcf_path])
@pytest.mark.parametrize('option', ['afm', 'dump', 'svg'])
def test_missing_glyph_names_pr905(option):
input_path = get_bad_input_path('pr905.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', option, '-f', input_path, output_path])
expected_path = get_expected_path('pr905' + '.' + option)
if option == 'afm':
skip = ['-s',
'Comment Creation Date:' + SPLIT_MARKER + 'Comment Copyright']
elif option == 'dump':
skip = ['-s', '## Filename']
else:
skip = []
assert differ([expected_path, output_path] + skip)
def test_missing_glyph_names_pr905_cef():
input_path = get_bad_input_path('pr905.otf')
output_path = get_temp_file_path()
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-a', '-o', 'cef', '-f', input_path, output_path])
assert(err.value.returncode > 0) # error code, not segfault of -11
def test_var_bug_913():
# AdobeVFPrototype_mod.otf is a modified copy of AdobeVFPrototype.otf 1.003
# so that the region indexes in HVAR are listed in a different order from
# those in CFF2. Also MVAR table has been modified to contain (dummy)
# deltas for underline offset and underline thickness just to exercize
# MVAR lookup code.
font_path = get_input_path('AdobeVFPrototype_mod.otf')
save_path = get_temp_file_path()
runner(CMD + ['-a', '-o',
'3', 'g', '_A,W,y', 'U', '_900,0',
'-f', font_path, save_path])
expected_path = get_expected_path('bug913.txt')
assert differ([expected_path, save_path, '-s', '## Filename'])
def test_bad_charset():
font_path = get_bad_input_path('bad_charset.otf')
save_path = get_temp_file_path()
runner(CMD + ['-a', '-f', font_path, save_path])
expected_path = get_expected_path('bad_charset.txt')
assert differ([expected_path, save_path, '-s', '## Filename'])
def test_bug_940():
input_path = get_bad_input_path('bug940_private_blend.otf')
output_path = get_temp_file_path()
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-a', '-o', 'cff2', '-f', input_path, output_path])
assert(err.value.returncode > 0) # error code, not segfault or success
def test_too_many_glyphs_pr955():
input_path = get_bad_input_path('TooManyGlyphsCFF2.otf')
output_path = get_temp_file_path()
with pytest.raises(subprocess.CalledProcessError) as err:
runner(CMD + ['-a', '-o', 'cff', '-f', input_path, output_path])
assert(err.value.returncode > 0) # error code, not hang or success
def test_ttread_varinst():
font_path = get_input_path('AdobeVFPrototype.ttf')
save_path = get_temp_file_path()
runner(CMD + ['-a', '-o', '3', 'g', '_A', 'U', '_500,800',
'-f', font_path, save_path])
expected_path = get_expected_path('vfproto_tt_inst500_800.txt')
assert differ([expected_path, save_path, '-s', '## Filename'])
def test_unused_post2_names():
font_path = get_input_path('SourceSansPro-Regular-cff2-unused-post.otf')
save_path = get_temp_file_path()
runner(CMD + ['-a', '-o', '1', '-f', font_path, save_path])
expected_path = get_expected_path('ssr-cff2-unused-post.txt')
assert differ([expected_path, save_path, '-s', '## Filename'])
def test_seac_reporting():
# This test aims to show that the SEAC operator
# is not reported by all tx modes
font_path = get_input_path('seac.otf')
save_path = get_temp_file_path()
runner(CMD + ['-a', '-o', '6', '-f', font_path, save_path])
expected_path = get_expected_path('seac.dump.txt')
assert differ([expected_path, save_path])
runner(CMD + ['-a', '-o', 'dcf', '5', 'T', '_c',
'-f', font_path, save_path])
expected_path = get_expected_path('seac.dcf.txt')
assert differ([expected_path, save_path])
def test_date_and_time_afm():
"""
test the use of date and time functions in absfont_afm.c
"""
input_path = get_input_path('font.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'afm', '-f', input_path, output_path])
now = time.time()
year = '%s' % time.localtime().tm_year
with open(output_path) as output_file:
lines = output_file.readlines()
file_year = lines[1].split()[2]
assert year == file_year
file_time_str = lines[2].split(': ')[1].strip()
file_time = time.mktime(
time.strptime(file_time_str, '%a %b %d %H:%M:%S %Y'))
hours_diff = abs(now - file_time) / 3600
assert(hours_diff < 1)
def test_date_and_time_ps():
"""
test the use of date and time functions in absfont_draw.c
"""
input_path = get_input_path('font.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'ps', '-f', input_path, output_path])
now = time.time()
with open(output_path) as output_file:
lines = output_file.readlines()
date_str = re.split(r'[()]', lines[5])[1]
date_str = date_str.split(': ')[1]
time_str = re.split(r'[()]', lines[7])[1]
time_str = time_str.split(': ')[1]
file_date_and_time_str = date_str + ' ' + time_str
file_time = time.mktime(
time.strptime(file_date_and_time_str, '%m/%d/%y %H:%M'))
hours_diff = abs(now - file_time) / 3600
assert(hours_diff < 1)
def test_date_and_time_pdf():
"""
test the use of date and time functions in pdfwrite.c
"""
input_path = get_input_path('font.otf')
output_path = get_temp_file_path()
runner(CMD + ['-a', '-o', 'pdf', '-f', input_path, output_path])
now = time.time()
tz = time.timezone
tz_hr = abs(int(tz / 3600)) # ignore sign since we're splitting on +/-
tz_min = (tz % 3600) // 60
with open(output_path) as output_file:
lines = output_file.readlines()
creation_date_str = re.split(r'[()]', lines[13])[1]
mod_date_str = re.split(r'[()]', lines[14])[1]
assert(creation_date_str == mod_date_str)
(date_time_str, tz_hr_str, tz_min_str) = \
re.split(r"[:+\-Z']", creation_date_str)[1:4]
creation_time = time.mktime(
time.strptime(date_time_str, '%Y%m%d%H%M%S'))
hours_diff = abs(now - creation_time) / 3600
assert(hours_diff < 1)
creation_tz_hr = int(tz_hr_str)
assert(creation_tz_hr == tz_hr)
creation_tz_min = int(tz_min_str)
assert(creation_tz_min == tz_min)
file_date_str = re.split(r"[():]", lines[36])[2].strip()
file_time_str = re.split(r"[() ]", lines[38])[3]
file_date_time_str = file_date_str + ' ' + file_time_str
file_time = time.mktime(
time.strptime(file_date_time_str, "%d %b %y %H:%M"))
hours_diff = abs(now - file_time) / 3600
assert(hours_diff < 1)
def test_overlap_removal():
input_path = get_input_path('overlaps.ufo')
expected_path = get_expected_path('overlaps.pfa')
output_path = get_temp_file_path()
args = [TOOL, '-t1', '+V', '-o', output_path, input_path]
subprocess.call(args)
assert differ([expected_path, output_path, '-s', PFA_SKIP[0]])
@pytest.mark.parametrize("fmt", [
"cff",
"cff2",
])
def test_nonstd_fontmatrix(fmt):
input_path = get_input_path("nonstdfmtx.otf")
txt_filename = f"nonstdfmtx_{fmt}.txt"
expected_path = get_expected_path(txt_filename)
output_dir = get_temp_dir_path()
bin_output = os.path.join(output_dir, f"nonstdfmtx.{fmt}")
output_path = os.path.join(output_dir, txt_filename)
runner(CMD + ['-a', '-o', fmt, '*S', '*b', '-f', input_path, bin_output])
runner(CMD + ['-a', '-o', 'dump', '-f', bin_output, output_path])
skip = "## Filename "
assert differ([expected_path, output_path, '-s', skip])
def test_pdf_single_glyph():
input_path = get_input_path("bug1218.otf")
pdf_filename = "bug1218.pdf"
expected_path = get_expected_path(pdf_filename)
output_dir = get_temp_dir_path()
output_path = os.path.join(output_dir, pdf_filename)
runner(CMD + ['-a', '-o', 'pdf', '1', '-f', input_path, output_path])
skip = PDF_SKIP[:]
skip.insert(0, '-s')
regex_skip = PDF_SKIP_REGEX[:]
for regex in regex_skip:
skip.append('-r')
skip.append(regex)
assert differ([expected_path, output_path] + skip)
def test_cffread_bug1343():
"""
Check FontBBox values
"""
actual_path = runner(CMD + ['-s', '-f', 'font.otf', '-o', '3'])
expected_path = get_expected_path('font.otf.dump3.txt')
assert differ([expected_path, actual_path, '-s', '## Filename'])
@pytest.mark.parametrize('arg, input, output, expected', [
('ufo', 'cidfont.subset', 'cidfont_subset.ufo', 'testCID.ufo'),
('t1', 'testCID.ufo', 'cidfont_subset.ufo', 'cidfont.subset'),
(('ufo', 't1'), 'cidfont.subset', 'cidfont_subset.ufo', 'cidfont.subset'),
(('t1', 'ufo'), 'testCID.ufo', 'cidfont_subset.ufo', 'testCID.ufo'),
])
def test_cidkeyed_read_write(arg, input, output, expected):
"""
Tests reading & writing CID-Keyed fonts in tx (uforead & ufowrite)
CID -> UFO (one-way test)
UFO -> CID (one-way test)
CID -> UFO -> CID (round-trip test)
UFO -> CID -> UFO (round-trip test)
"""
folder = "cid_roundtrip/"
input_path = get_input_path(folder + input)
output_dir = get_temp_dir_path()
output_path = os.path.join(output_dir, output)
expected_path = get_expected_path(folder + expected)
if isinstance(arg, tuple): # round-trip tests
runner(CMD + ['-a', '-o', arg[0], '-f',
input_path, output_path])
final_output_dir = get_temp_dir_path()
final_output_path = os.path.join(final_output_dir, output)
runner(CMD + ['-a', '-o', arg[1], '-f',
output_path, final_output_path])
output_path = final_output_path
else: # one-way tests
runner(CMD + ['-a', '-o', arg, '-f',
input_path, output_path])
if '.subset' in expected_path:
expected_path = generate_ps_dump(expected_path)
output_path = generate_ps_dump(output_path)
assert differ([expected_path, output_path])
@pytest.mark.parametrize("file", [
"missing_CID.ufo",
"missing_iFD.ufo",
])
def test_cidkeyed_lib_missing(file):
folder = folder = "cidkeyed_missing_lib/"
ufo_input_path = get_input_path(folder + file)
arg = [TOOL, '-t1', '-f', ufo_input_path]
assert subprocess.call(arg) == 6
def test_cff2_windows_line_endings_bug1355():
# Testing writing binary to stdout on Windows
# to ensure line endings are not inserted.
font_path = get_input_path('regular_CFF2.otf')
actual_path = runner(CMD + ['-s', '-a', '-o', 'cff2',
'*S', '*b', '-f', font_path])
expected_path = get_expected_path('bug1355.cff2')
assert differ([expected_path, actual_path, '-m', 'bin'])
|
import os
import re
import sys
import copy
import logging
import warnings
import subprocess
import shutil
import uuid
import tempfile
import asyncio
from collections import OrderedDict
from pprint import pformat
from yggdrasil import platform, tools, languages, multitasking, constants
from yggdrasil.components import import_component
from yggdrasil.drivers.Driver import Driver
from yggdrasil.metaschema.datatypes import is_default_typedef
from queue import Empty
logger = logging.getLogger(__name__)
_map_language_ext = OrderedDict()
def remove_product(product, check_for_source=False, **kwargs):
r"""Delete a single product after checking that the product is not (or
does not contain, in the case of directories), source files.
Args:
product (str): Full path to a file or directory that should be
removed.
check_for_source (bool, optional): If True, the specified product
will be checked to ensure that no source files are present. If
a source file is present, a RuntimeError will be raised.
Defaults to False.
**kwargs: Additional keyword arguments are passed to tools.remove_path.
Raises:
RuntimeError: If the specified product is a source file and
check_for_source is False.
RuntimeError: If the specified product is a directory that contains
a source file and check_for_source is False.
RuntimeError: If the product cannot be removed.
"""
tools.import_all_modules('yggdrasil.drivers')
source_keys = list(_map_language_ext.keys())
if '.exe' in source_keys: # pragma: windows
source_keys.remove('.exe')
if check_for_source:
if os.path.isdir(product):
ext_tuple = tuple(source_keys)
for root, dirs, files in os.walk(product):
for f in files:
if f.endswith(ext_tuple):
raise RuntimeError(("%s contains a source file "
"(%s)") % (product, f))
elif os.path.isfile(product):
ext = os.path.splitext(product)[-1]
if ext in source_keys:
raise RuntimeError("%s is a source file." % product)
tools.remove_path(product, **kwargs)
def remove_products(products, source_products):
r"""Delete products produced during the process of running the model.
Args:
products (list): List of products that should be removed after
checking that they are not source files.
source_products (list): List of products that should be removed
without checking that they are not source files.
"""
for p in source_products:
remove_product(p)
for p in products:
remove_product(p, check_for_source=True)
class ModelDriver(Driver):
r"""Base class for Model drivers and for running executable based models.
Args:
name (str): Unique name used to identify the model. This will
be used to report errors associated with the model.
args (str or list): The path to the file containing the model
program that will be run by the driver for the model's language
and/or a list of arguments that should be passed as input to the
model program or language executable (e.g. source code or
configuration file for a domain specific language).
products (list, optional): Paths to files created by the model that
should be cleaned up when the model exits. Entries can be absolute
paths or paths relative to the working directory. Defaults to [].
function (str, optional): If provided, an integrated model is
created by wrapping the function named here. The function must be
located within the file specified by the source file listed in the
first argument. If not provided, the model must contain it's own
calls to the |yggdrasil| interface.
iter_function_over (array, optional): Variable(s) that should be
received or sent as an array, but iterated over. Defaults to an
empty array and is ignored.
source_products (list, optional): Files created by running the model
that are source files. These files will be removed without checking
their extension so users should avoid adding files to this list
unless they are sure they should be deleted. Defaults to [].
is_server (bool, dict, optional): If `True`, the model is assumed to be a
server for one or more client models and an instance of
:class:`yggdrasil.drivers.ServerDriver` is started. The
corresponding channel that should be passed to the yggdrasil API
will be the name of the model. If is_server is a dictionary, it
should contain an 'input' key and an 'output' key. These are
required to be the names of existing input and output channels in
the model that will be co-opted by the server. (Note: This requires
that the co-opted output channel's send method is called once for
each time the co-opted input channel's recv method is called. If
used with the `function` parameter, `is_server` must be a dictionary.
Defaults to False.
client_of (str, list, optional): The names of one or more models that
this model will call as a server. If there are more than one, this
should be specified as a sequence collection (list). The
corresponding channel(s) that should be passed to the yggdrasil API
will be the name of the server model joined with the name of the
client model with an underscore `<server_model>_<client_model>`.
There will be one channel created for each server the model is a
client of. Defaults to empty list. Use of `client_of` with `function`
is not currently supported.
timesync (bool, str, optional): If set, the model is assumed to
call a send then receive of the state at each timestep
for syncronization with other models that are also
integrating in time. If a string is provided, it is assumed
to be the name of the server that will handle timestep
synchronization. If a boolean is provided, the name of the
server will be assumed to be 'timestep'. Defaults to False.
overwrite (bool, optional): If True, any existing model products
(compilation products, wrapper scripts, etc.) are removed prior to
the run. If False, the products are not removed. Defaults to True.
Setting this to False can improve the performance, particularly for
models that take a long time to compile, but this should only be
done once the model has been fully debugged to ensure that each run
is tested on a clean copy of the model. The value of this keyword
also determines whether or not products are removed after a run.
preserve_cache (bool, optional): If True model products will be kept
following the run, otherwise all products will be cleaned up.
Defaults to False. This keyword is superceeded by overwrite.
with_strace (bool, optional): If True, the command is run with strace (on
Linux) or dtrace (on MacOS). Defaults to False.
strace_flags (list, optional): Flags to pass to strace (or dtrace).
Defaults to [].
with_valgrind (bool, optional): If True, the command is run with valgrind.
Defaults to False.
valgrind_flags (list, optional): Flags to pass to valgrind. Defaults to [].
model_index (int, optional): Index of model in list of models being run.
Defaults to 0.
copy_index (int, optional): Index of model in set of copies. Defaults
to -1 indicating there is only one copy of the model.
outputs_in_inputs (bool, optional): If True, outputs from wrapped model
functions are passed by pointer as inputs for modification and the
return value will be a flag. If False, outputs are limited to
return values. Defaults to the value of the class attribute
outputs_in_inputs.
logging_level (str, optional): The level of logging messages that should
be displayed by the model. Defaults to the logging level as
determined by the configuration file and environment variables.
allow_threading (bool, optional): If True, comm connections will be set up
so that the model-side comms can be used by more than one thread.
Defaults to False.
copies (int, optional): The number of copies of the model that should be
created. Defaults to 1.
repository_url (str, optional): URL for the git repository containing
the model source code. If provided, relative paths in the model
YAML definition will be considered relative to the repository root
directory.
repository_commit (str, optional): Commit that should be checked out
in the model repository specified by repository_url. If not
provided, the most recent commit on the default branch will be used.
description (str, optional): Description of the model. This parameter
is only used in the model repository or when providing the model
as a service.
contact_email (str, optional): Email address that should be used to
contact the maintainer of the model. This parameter is only used
in the model repository.
validation_command (str, optional): Path to a validation command that
can be used to verify that the model ran as expected. A non-zero
return code is taken to indicate failure.
dependencies (list, optional): A list of packages required by the
model that are written in the same language as the model. If the
package requires dependencies outside the language of the model.
use the additional_dependencies parameter to provide them. If you
need a version of the package from a specific package manager,
a mapping with 'package' and 'package_manager' fields can be
provided instead of just the name of the package.
additional_dependencies (dict, optional): A mapping between languages
and lists of packages in those languages that are required by the
model.
**kwargs: Additional keyword arguments are passed to parent class.
Class Attributes:
language (str): Primary name for the programming language that this
compiler should be used for. [REQUIRED]
language_aliases (list): Additional/alternative names that the language
may be known by.
language_ext (list): Extensions for programs written in the target
language. [REQUIRED]
base_languages (list): Other programming languages that this driver
and the interpreter for the target language are dependent on (e.g.
Matlab models require Python).
executable_type (str): 'compiler' or 'interpreter' to indicate the type
of the executable for the language. [AUTOMATED]
interface_library (list): Name of the library containing the yggdrasil
interface for the target language. [REQUIRED]
interface_directories (list): Directories containing code in the
interface library for the target language.
interface_dependencies (list): List of names of libraries that are
required to use the interface on the current platform. This dosn't
include libraries required by specific communication types which are
described by supported_comm_options.
supported_comms (list): Name of comms supported in the target language.
[REQUIRED]
supported_comm_options (dict): Options for the supported comms like the
platforms they are available on and the external libraries required
to use them. [REQUIRED]
external_libraries (dict): Information on external libraries required
for running models in the target language using yggdrasil.
internal_libraries (dict): Information on internal libraries required
for running models in the target language using yggdrasil.
type_map (dict): Mapping of |yggdrasil| extended JSON types to
datatypes in the target programming language. [REQUIRED]
function_param (dict): Options specifying how different operations
would be encoded in the target language (e.g. if statements, for
loops, while loops). [REQUIRED]
version_flags (list): Flags that should be called with the language
executable to determine the version of the compiler/interpreter.
Defaults to ['--version'].
outputs_in_inputs (bool): If True, outputs are passed by pointer as
inputs for modification and the return value should be a flag.
Defaults to False.
include_arg_count (bool): If True, the number of arguments passed
to send/recv calls is prepended to the arguments to the function.
Defaults to False.
include_channel_obj (bool): If True, the channel object is passed as
input to the send/recv calls (after the argument count if it is
also present due to include_arg_count being True). Defaults to
False.
is_typed (bool): True if the language is typed, False otherwise.
brackets (tuple): A pair of opening and clossing characters that
are used by the language to mark blocks. Set to None and
ignored by default.
no_executable (bool): True if there is not an executable associated
with the language driver. Defaults to False.
comms_implicit (bool): True if the comms installed for this driver
are not explicitly defined (depend on input parameters). Defaults
to False.
Attributes:
args (list): Argument(s) for running the model on the command line.
model_file (str): Full path to the model executable or interpretable
script.
model_args (list): Runtime arguments for running the model on the
command line.
model_src (str): Full path to the model source code. For interpreted
languages, this will be the same as model_file.
model_function_info (dict): Parameters recovered by parsing the
provided model function definition.
overwrite (bool): If True, any existing compilation products will be
overwritten by compilation and cleaned up following the run.
Otherwise, existing products will be used and will remain after
the run.
products (list): Files created by running the model. This includes
compilation products such as executables and/or object files.
source_products (list): Files created by running the model that
are source files. These files will be removed without checking
their extension so users should avoid adding files to this list
unless they are sure they should be deleted.
wrapper_products (list): Files created in order to wrap the model.
process (:class:`yggdrasil.tools.YggPopen`): Process used to run
the model.
function (str): The name of the model function that should be wrapped.
iter_function_over (array): Variable(s) that should be received or
sent as an array, but iterated over.
is_server (bool, dict): If True, the model is assumed to be a server
and an instance of :class:`yggdrasil.drivers.ServerDriver` is
started. If a dict, the input/output channels with the specified
names in the dict will be replaced with a server.
client_of (list): The names of server models that this model is a
client of.
timesync (str): If set, the name of the server performing
timestep synchronization for the model.
with_strace (bool): If True, the command is run with strace or dtrace.
strace_flags (list): Flags to pass to strace/dtrace.
with_valgrind (bool): If True, the command is run with valgrind.
valgrind_flags (list): Flags to pass to valgrind.
model_index (int): Index of model in list of models being run.
copy_index (int): Index of model in set of copies.
modified_files (list): List of pairs of originals and copies of files
that should be restored during cleanup.
allow_threading (bool): If True, comm connections will be set up so that
the model-side comms can be used by more than one thread.
copies (int): The number of copies of the model that should be created.
repository_url (str): URL for the git repository containing the model
source code. If provided, relative paths in the model YAML
definition will be considered relative to the repository root
directory.
repository_commit (str): Commit that should be checked out in the
model repository specified by repository_url.
description (str): Description of the model. This parameter is only
used in the model repository or when providing the model as a
service.
contact_email (str): Email address that should be used to contact the
maintainer of the model. This parameter is only used in the model
repository.
validation_command (str): Path to a validation command that can be
used to verify that the model ran as expected. A non-zero return
code is taken to indicate failure.
dependencies (list): A list of packages required by the model that are
written in the same language as the model. If the package requires
dependencies outside the language of the model, use the
additional_dependencies parameter to provide them. If you need a
version of the package from a specific package manager, a mapping
with 'package' and 'package_manager' fields can be provided
instead of just the name of the package.
additional_dependencies (dict): A mapping between languages and lists
of packages in those languages that are required by the model.
Raises:
RuntimeError: If both with_strace and with_valgrind are True.
"""
_schema_type = 'model'
_schema_subtype_key = 'language'
_schema_required = ['name', 'language', 'args', 'working_dir']
_schema_properties = {
'name': {'type': 'string'},
'language': {'type': 'string', 'default': 'executable',
'description': (
'The programming language that the model '
'is written in. A list of available '
'languages can be found :ref:`here <'
'schema_table_model_subtype_rst>`.')},
'args': {'type': 'array',
'items': {'type': 'string', 'minLength': 1}},
'inputs': {'type': 'array', 'default': [],
'items': {'$ref': '#/definitions/comm'},
'description': (
'Zero or more channels carrying input to the model. '
'A full description of channel entries and the '
'options available for channels can be found '
':ref:`here<yaml_comm_options>`.')},
'outputs': {'type': 'array', 'default': [],
'items': {'$ref': '#/definitions/comm'},
'description': (
'Zero or more channels carrying output from the '
'model. A full description of channel entries and '
'the options available for channels can be found '
':ref:`here<yaml_comm_options>`.')},
'env': {'type': 'object', 'default': {},
'additional_properties': {'type': 'string'}},
'products': {'type': 'array', 'default': [],
'items': {'type': 'string'}},
'source_products': {'type': 'array', 'default': [],
'items': {'type': 'string'}},
'working_dir': {'type': 'string'},
'overwrite': {'type': 'boolean'},
'preserve_cache': {'type': 'boolean', 'default': False},
'function': {'type': 'string'},
'iter_function_over': {'type': 'array', 'default': [],
'items': {'type': 'string'}},
'is_server': {'anyOf': [{'type': 'boolean'},
{'type': 'object',
'properties': {'input': {'type': 'string'},
'output': {'type': 'string'}},
'additionalProperties': False}],
'default': False},
'client_of': {'type': 'array', 'items': {'type': 'string'},
'default': []},
'timesync': {
'anyOf': [
{'type': 'boolean'}, {'type': 'string'},
{'type': 'object',
'required': ['name'],
'properties': {
'name': {'type': 'string', 'default': 'timesync'},
'inputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]},
'outputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]}}},
{'type': 'array',
'items': {
'anyOf': [
{'type': 'string'},
{'type': 'object',
'required': ['name'],
'properties': {
'name': {'type': 'string',
'default': 'timesync'},
'inputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]},
'outputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]}}}]}}],
'default': False},
'with_strace': {'type': 'boolean', 'default': False},
'strace_flags': {'type': 'array',
'default': ['-e', 'trace=memory'],
'items': {'type': 'string'}},
'with_valgrind': {'type': 'boolean', 'default': False},
'valgrind_flags': {'type': 'array',
'default': ['--leak-check=full',
'--show-leak-kinds=all'], # '-v'
'items': {'type': 'string'}},
'outputs_in_inputs': {'type': 'boolean'},
'logging_level': {'type': 'string', 'default': ''},
'allow_threading': {'type': 'boolean'},
'copies': {'type': 'integer', 'default': 1, 'minimum': 1},
'repository_url': {'type': 'string'},
'repository_commit': {'type': 'string'},
'description': {'type': 'string'},
'contact_email': {'type': 'string'},
'validation_command': {'type': 'string'},
'dependencies': {
'type': 'array',
'items': {'oneOf': [
{'type': 'string'},
{'type': 'object',
'required': ['package'],
'properties': {
'package': {'type': 'string'},
'package_manager': {'type': 'string'},
'arguments': {'type': 'string'}},
'additionalProperties': False}]}},
'additional_dependencies': {
'type': 'object',
'additionalProperties': {
'type': 'array',
'items': {'oneOf': [
{'type': 'string'},
{'type': 'object',
'required': ['package'],
'properties': {
'package': {'type': 'string'},
'package_manager': {'type': 'string'},
'arguments': {'type': 'string'}},
'additionalProperties': False}]}}}}
_schema_excluded_from_class = ['name', 'language', 'args', 'working_dir']
_schema_excluded_from_class_validation = ['inputs', 'outputs']
language = None
language_ext = None
language_aliases = []
base_languages = []
executable_type = None
interface_library = None
interface_directories = []
interface_dependencies = []
supported_comms = []
supported_comm_options = {}
external_libraries = {}
internal_libraries = {}
type_map = None
inverse_type_map = None
function_param = None
version_flags = ['--version']
full_language = True
outputs_in_inputs = False
include_arg_count = False
include_channel_obj = False
is_typed = False
types_in_funcdef = True
interface_inside_exec = False
dont_declare_channel = False
is_dsl = False
brackets = None
zero_based = True
max_line_width = None
no_executable = False
comms_implicit = False
python_interface = {'table_input': 'YggAsciiTableInput',
'table_output': 'YggAsciiTableOutput',
'array_input': 'YggArrayInput',
'array_output': 'YggArrayOutput',
'pandas_input': 'YggPandasInput',
'pandas_output': 'YggPandasOutput'}
_library_cache = {}
_config_keys = []
_config_attr_map = []
_executable_search_dirs = None
_disconnect_attr = (Driver._disconnect_attr
+ ['queue', 'queue_thread',
'event_process_kill_called',
'event_process_kill_complete',
'model_process'])
_mpi_tags = {'ENV': 1,
'START': 2,
'STOP_RANK0': 3, # Stopped by partner
'STOP_RANKX': 4, # Stopped by root
'BUILDFILE': 5,
'LOCK_BUILDFILE': 6,
'UNLOCK_BUILDFILE': 7}
def __init__(self, name, args, model_index=0, copy_index=-1, clients=[],
preparsed_function=None, outputs_in_inputs=None,
mpi_rank=0, mpi_tag_start=None, **kwargs):
self._inv_mpi_tags = {v: k for k, v in self._mpi_tags.items()}
self.model_outputs_in_inputs = outputs_in_inputs
self.preparsed_function = preparsed_function
super(ModelDriver, self).__init__(name, **kwargs)
if self.overwrite is None:
self.overwrite = (not self.preserve_cache)
# Setup process things
self.model_process = None
self.queue = multitasking.Queue()
self.queue_thread = None
self.event_process_kill_called = multitasking.Event()
self.event_process_kill_complete = multitasking.Event()
# Strace/valgrind
if self.with_strace and self.with_valgrind:
raise RuntimeError("Trying to run with strace and valgrind.")
if (((self.with_strace or self.with_valgrind)
and platform._is_win)): # pragma: windows
raise RuntimeError("strace/valgrind options invalid on windows.")
self.model_index = model_index
self.copy_index = copy_index
self.clients = clients
self.env_copy = ['LANG', 'PATH', 'USER']
self._exit_line = b'EXIT'
for k in self.env_copy:
if k in os.environ:
self.env[k] = os.environ[k]
if not self.is_installed():
raise RuntimeError("%s is not installed" % self.language)
self.raw_model_file = None
self.model_function_file = None
self.model_function_info = None
self.model_function_inputs = None
self.model_function_outputs = None
self.model_file = None
self.model_args = []
self.model_dir = None
self.model_src = None
self.args = args
self.modified_files = []
self.wrapper_products = []
self._mpi_comm = False
self._mpi_rank = 0
self._mpi_size = 1
self._mpi_requests = {}
self._mpi_tag = (len(self._mpi_tags) * self.model_index)
if mpi_tag_start is not None:
self._mpi_tag += mpi_tag_start
if multitasking._on_mpi:
self._mpi_comm = multitasking.MPI.COMM_WORLD
self._mpi_rank = self._mpi_comm.Get_rank()
self._mpi_size = self._mpi_comm.Get_size()
self._mpi_partner_rank = mpi_rank
# Update for function
if self.function:
args = [self.init_from_function(args)]
# Parse arguments
self.debug(str(args))
self.parse_arguments(args)
assert(self.model_file is not None)
# Remove products
if self.overwrite:
self.remove_products()
# Write wrapper
if self.function:
self.wrapper_products.append(args[0])
self.wrapper_products += self.write_wrappers()
# Install dependencies
if self.dependencies:
self.install_model_dependencies(self.dependencies)
if self.additional_dependencies:
for language, v in self.additional_dependencies.items():
drv = import_component('model', language)
drv.install_model_dependencies(v)
@staticmethod
def before_registration(cls):
r"""Operations that should be performed to modify class attributes prior
to registration including things like platform dependent properties and
checking environment variables for default settings.
"""
Driver.before_registration(cls)
cls.inverse_type_map = None
cls._language = cls.language
cls._language_aliases = cls.language_aliases
if (((cls.language_ext is not None)
and (not isinstance(cls.language_ext, (list, tuple))))):
cls.language_ext = [cls.language_ext]
@staticmethod
def after_registration(cls, cfg=None, second_pass=False):
r"""Operations that should be performed to modify class attributes after
registration. For compiled languages this includes selecting the
default compiler. The order of precedence is the config file 'compiler'
option for the language, followed by the environment variable set by
_compiler_env, followed by the existing class attribute.
Args:
cfg (YggConfigParser, optional): Config class that should
be used to set options for the driver. Defaults to
None and yggdrasil.config.ygg_cfg is used.
second_pass (bool, optional): If True, the class as already
been registered. Defaults to False.
"""
if cfg is None:
from yggdrasil.config import ygg_cfg
cfg = ygg_cfg
cfg.reload()
Driver.after_registration(cls)
cls.cfg = cfg
for x in cls._config_attr_map:
ka = x['attr']
k0 = x.get('key', ka)
setattr(cls, ka, cls.cfg.get(cls.language, k0,
getattr(cls, ka)))
@staticmethod
def finalize_registration(cls):
r"""Operations that should be performed after a class has been fully
initialized and registered."""
global _map_language_ext
for x in cls.get_language_ext():
if x not in _map_language_ext:
_map_language_ext[x] = []
_map_language_ext[x].append(cls.language)
@classmethod
def mpi_partner_init(cls, self):
r"""Actions initializing an MPIPartnerModel."""
pass
@classmethod
def mpi_partner_cleanup(cls, self):
r"""Actions cleaning up an MPIPartnerModel."""
pass
@classmethod
def get_inverse_type_map(cls):
r"""Get the inverse type map.
Returns:
dict: Mapping from native type to JSON type.
"""
if cls.inverse_type_map is None:
cls.inverse_type_map = {}
for k, v in cls.type_map.items():
if k != 'flag':
cls.inverse_type_map[v] = k
return cls.inverse_type_map
@classmethod
def get_language_for_source(cls, fname, languages=None, early_exit=False,
**kwargs):
r"""Determine the language that can be used with the provided source
file(s). If more than one language applies to a set of multiple files,
the language that applies to the most files is returned.
Args:
fname (str, list): The full path to one or more files. If more than
one
languages (list, optional): The list of languages that are acceptable.
Defaults to None and any language will be acceptable.
early_exit (bool, optional): If True, the first language identified
will be returned if fname is a list of files. Defaults to False.
**kwargs: Additional keyword arguments are passed to recursive calls.
Returns:
str: The language that can operate on the specified file.
"""
if isinstance(fname, list):
lang_dict = {}
for f in fname:
try:
ilang = cls.get_language_for_source(f, languages=languages,
**kwargs)
if early_exit:
return ilang
except ValueError:
continue
lang_dict.setdefault(ilang, 0)
lang_dict[ilang] += 1
if lang_dict:
return max(lang_dict, key=lang_dict.get)
else:
ext = os.path.splitext(fname)[-1]
for ilang in cls.get_map_language_ext().get(ext, []):
if (languages is None) or (ilang in languages):
return ilang
raise ValueError("Cannot determine language for file(s): '%s'" % fname)
@classmethod
def get_map_language_ext(cls):
r"""Return the mapping of all language extensions."""
return _map_language_ext
@classmethod
def get_all_language_ext(cls):
r"""Return the list of all language extensions."""
return list(_map_language_ext.keys())
@classmethod
def get_language_dir(cls):
r"""Return the langauge directory."""
return languages.get_language_dir(cls.language)
@classmethod
def get_language_ext(cls):
r"""Return the language extension, including from the base classes."""
out = cls.language_ext
if out is None:
out = []
for x in cls.base_languages:
out += import_component('model', x).get_language_ext()
return out
def parse_arguments(self, args, default_model_dir=None):
r"""Sort model arguments to determine which one is the executable
and which ones are arguments.
Args:
args (list): List of arguments provided.
default_model_dir (str, optional): Path to directory that should be
used to normalize the model file path if it is not absolute.
Defaults to None and is set to the working_dir.
"""
if isinstance(args, (str, bytes)):
args = args.split()
for i in range(len(args)):
args[i] = str(args[i])
assert(isinstance(args, list))
if default_model_dir is None:
default_model_dir = self.working_dir
self.raw_model_file = args[0]
self.model_file = self.raw_model_file
self.model_args = args[1:]
if (self.language != 'executable') and (not os.path.isabs(self.model_file)):
model_file = os.path.normpath(os.path.join(default_model_dir,
self.model_file))
self.model_file = model_file
self.model_dir = os.path.dirname(self.model_file)
self.debug("model_file = '%s', model_dir = '%s', model_args = '%s'",
self.model_file, self.model_dir, self.model_args)
def init_from_function(self, args):
r"""Initialize model parameters based on the wrapped function."""
if not self.preparsed_function:
yml_mock = dict(self.yml,
name=self.name,
args=self.args,
function=self.function,
is_server=self.is_server,
client_of=self.client_of,
inputs=self.inputs,
outputs=self.outputs,
iter_function_over=self.iter_function_over,
copies=self.copies)
self.preparsed_function = self.preparse_function(yml_mock)
self.model_function_info = self.preparsed_function['model_file']
self.model_function_file = self.model_function_info['model_file']
self.model_function_inputs = self.preparsed_function['inputs']
self.model_function_outputs = self.preparsed_function['outputs']
self.model_outputs_in_inputs = self.preparsed_function['outputs_in_inputs']
model_dir, model_base = os.path.split(self.model_function_file)
model_base = os.path.splitext(model_base)[0]
wrapper_fname = os.path.join(model_dir,
'ygg_%s_%s%s' % (model_base, self.name,
self.language_ext[0]))
lines = self.write_model_wrapper(model_name=self.name,
**self.preparsed_function)
# Write file
if (not os.path.isfile(wrapper_fname)) or self.overwrite:
with open(wrapper_fname, 'w') as fd:
fd.write('\n'.join(lines))
return wrapper_fname
@property
def numeric_logging_level(self):
r"""int: Logging level for the model."""
out = self.logger.getEffectiveLevel()
if self.logging_level:
out = logging.getLevelName(self.logging_level)
return out
@property
def n_sent_messages(self):
r"""dict: Number of messages sent by the model via each connection."""
if (self._mpi_rank > 0) and self.check_mpi_request('stopped'):
out = self._mpi_requests['stopped'].result
return out
out = {}
for x in self.yml.get('output_drivers', []):
x_inst = x.get('instance', None)
if x_inst:
out[x_inst.name] = x_inst.models_recvd.get(self.name, 0)
if self.is_server:
for x in self.yml.get('input_drivers', []):
x_inst = x.get('instance', None)
if x_inst and (x_inst._connection_type == 'rpc_request'):
out[x_inst.name] = x_inst.servers_recvd.get(self.name, 0)
return out
@property
def has_sent_messages(self):
r"""bool: True if output has been received from the model."""
n_msg = self.n_sent_messages
if not n_msg:
return True
return bool(sum(n_msg.values()))
def write_wrappers(self, **kwargs):
r"""Write any wrappers needed to compile and/or run a model.
Args:
**kwargs: Keyword arguments are ignored (only included to
allow cascade from child classes).
Returns:
list: Full paths to any created wrappers.
"""
return []
@classmethod
def install_model_dependencies(cls, dependencies, always_yes=False):
r"""Install any dependencies required by the model.
Args:
dependencies (list): Dependencies that should be installed.
always_yes (bool, optional): If True, the package manager will
not ask users for input during installation. Defaults to
False.
"""
packages = {}
for x in dependencies:
if isinstance(x, str):
x = {'package': x}
if x.get('arguments', None):
cls.install_dependency(always_yes=always_yes, **x)
else:
packages.setdefault(x.get('package_manager', None), [])
packages[x.get('package_manager', None)].append(
x['package'])
for k, v in packages.items():
cls.install_dependency(v, package_manager=k,
always_yes=always_yes)
@classmethod
def install_dependency(cls, package=None, package_manager=None,
arguments=None, command=None, always_yes=False):
r"""Install a dependency.
Args:
package (str): Name of the package that should be installed. If
the package manager supports it, this can include version
requirements.
package_manager (str, optional): Package manager that should be
used to install the package.
arguments (str, optional): Additional arguments that should be
passed to the package manager.
command (list, optional): Command that should be used to
install the package.
always_yes (bool, optional): If True, the package manager will
not ask users for input during installation. Defaults to
False.
"""
assert(package)
if isinstance(package, str):
package = package.split()
if package_manager is None:
if tools.get_conda_prefix():
package_manager = 'conda'
elif platform._is_mac:
package_manager = 'brew'
elif platform._is_linux:
package_manager = 'apt'
elif platform._is_win:
package_manager = 'choco'
yes_cmd = []
cmd_kwargs = {}
if command:
cmd = copy.copy(command)
elif package_manager == 'conda':
cmd = ['conda', 'install'] + package
if platform._is_win: # pragma: windows
# Conda commands must be run on the shell on windows as it
# is implemented as a batch script
cmd.insert(0, 'call')
cmd_kwargs['shell'] = True
yes_cmd = ['-y']
elif package_manager == 'brew':
cmd = ['brew', 'install'] + package
elif package_manager == 'apt':
cmd = ['apt-get', 'install'] + package
if bool(os.environ.get('GITHUB_ACTIONS', False)):
# Only enable sudo for testing, otherwise allow the user to
# decide if they want to run yggdrasil with sudo, or just
# install the dependencies themselves
cmd.insert(0, 'sudo')
yes_cmd = ['-y']
elif package_manager == 'choco':
cmd = ['choco', 'install'] + package
elif package_manager == 'vcpkg':
cmd = ['vcpkg.exe', 'install', '--triplet', 'x64-windows']
cmd += package
else:
package_managers = {'pip': 'python',
'cran': 'r'}
if package_manager in package_managers:
drv = import_component(
'model', package_managers[package_manager])
return drv.install_dependency(
package=package, package_manager=package_manager,
arguments=arguments, always_yes=always_yes)
raise NotImplementedError(f"Unsupported package manager: "
f"{package_manager}")
if arguments:
cmd += arguments.split()
if always_yes:
cmd += yes_cmd
if cmd_kwargs.get('shell', False):
cmd = ' '.join(cmd)
subprocess.check_call(cmd, **cmd_kwargs)
def model_command(self):
r"""Return the command that should be used to run the model.
Returns:
list: Any commands/arguments needed to run the model from the
command line.
"""
return [self.model_file] + self.model_args
@classmethod
def language_executable(cls, **kwargs):
r"""Command required to compile/run a model written in this language
from the command line.
Returns:
str: Name of (or path to) compiler/interpreter executable required
to run the compiler/interpreter from the command line.
"""
if cls.no_executable:
return ''
raise NotImplementedError("language_executable not implemented for '%s'"
% cls.language)
@classmethod
def executable_command(cls, args, unused_kwargs=None, **kwargs):
r"""Compose a command for running a program using the exectuable for
this language (compiler/interpreter) with the provided arguments.
Args:
args (list): The program that returned command should run and any
arguments that should be provided to it.
unused_kwargs (dict, optional): Existing dictionary that unused
keyword arguments should be added to. Defaults to None and is
ignored.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Arguments composing the command required to run the program
from the command line using the executable for this language.
"""
raise NotImplementedError("executable_command not implemented for '%s'"
% cls.language)
@classmethod
def run_executable(cls, args, return_process=False, debug_flags=None,
**kwargs):
r"""Run a program using the executable for this language and the
provided arguments.
Args:
args (list): The program that should be run and any arguments
that should be provided to it.
return_process (bool, optional): If True, the process class is
returned without checking the process output. If False,
communicate is called on the process and the output is parsed
for errors. Defaults to False.
debug_flags (list, optional): Debug executable and flags that should
be prepended to the executable command. Defaults to None and
is ignored.
**kwargs: Additional keyword arguments are passed to
cls.executable_command and tools.popen_nobuffer.
Returns:
str: Output to stdout from the run command if return_process is
False, the process if return_process is True.
Raises:
RuntimeError: If the language is not installed.
RuntimeError: If there is an error when running the command.
"""
unused_kwargs = {}
cmd = cls.executable_command(args, unused_kwargs=unused_kwargs, **kwargs)
if isinstance(debug_flags, list):
cmd = debug_flags + cmd
try:
# Add default keyword arguments
if 'working_dir' in unused_kwargs:
unused_kwargs.setdefault('cwd', unused_kwargs.pop('working_dir'))
unused_kwargs.setdefault('shell', platform._is_win)
# Call command
logger.debug("Running '%s' from %s"
% (' '.join(cmd), unused_kwargs.get('cwd', os.getcwd())))
logger.debug("Process keyword arguments:\n%s\n",
' ' + pformat(unused_kwargs).replace('\n', '\n '))
print(' '.join(cmd))
proc = tools.popen_nobuffer(cmd, **unused_kwargs)
if return_process:
return proc
out, err = proc.communicate()
if proc.returncode != 0:
if out:
logger.info('\n%s' % out.decode('utf-8'))
if err: # pragma: debug
logger.info('\n%s' % err.decode('utf-8'))
raise RuntimeError("Command '%s' failed with code %d."
% (' '.join(cmd), proc.returncode))
out = out.decode("utf-8")
logger.debug('%s\n%s' % (' '.join(cmd), out))
return out
except (subprocess.CalledProcessError, OSError) as e: # pragma: debug
raise RuntimeError("Could not call command '%s': %s"
% (' '.join(cmd), e))
def run_validation(self):
r"""Run the validation script for the model."""
if not self.validation_command:
return
subprocess.check_call(self.validation_command.split(),
cwd=self.working_dir)
def run_model(self, return_process=True, **kwargs):
r"""Run the model. Unless overridden, the model will be run using
run_executable.
Args:
return_process (bool, optional): If True, the process running
the model is returned. If False, the process will block until
the model finishes running. Defaults to True.
**kwargs: Keyword arguments are passed to run_executable.
"""
env = self.set_env()
command = self.model_command()
if self.with_strace or self.with_valgrind:
kwargs.setdefault('debug_flags', self.debug_flags)
self.debug('Working directory: %s', self.working_dir)
self.debug('Command: %s', ' '.join(command))
self.debug('Environment Variables:\n%s', self.pprint(env, block_indent=1))
# Update keywords
# NOTE: Setting forward_signals to False allows faster debugging
# but should not be used in deployment for cases where models are not
# running locally.
default_kwargs = dict(env=env, working_dir=self.working_dir,
forward_signals=False,
shell=platform._is_win)
for k, v in default_kwargs.items():
kwargs.setdefault(k, v)
return self.run_executable(command, return_process=return_process, **kwargs)
@property
def debug_flags(self):
r"""list: Flags that should be prepended to an executable command to
enable debugging."""
pre_args = []
if self.with_strace:
if platform._is_linux:
pre_args += ['strace'] + self.strace_flags
else: # pragma: debug
raise RuntimeError("strace not supported on this OS.")
# TODO: dtruss cannot be run without sudo, sudo cannot be
# added to the model process command if it is not in the original
# yggdrasil CLI call, and must be tested with an executable that
# is not "signed with restricted entitlements" (which most built-in
# utilities (e.g. sleep) are).
# elif platform._is_mac:
# if 'sudo' in sys.argv:
# pre_args += ['sudo']
# pre_args += ['dtruss']
elif self.with_valgrind:
pre_args += ['valgrind'] + self.valgrind_flags
return pre_args
@classmethod
def language_version(cls, version_flags=None, **kwargs):
r"""Determine the version of this language.
Args:
**kwargs: Keyword arguments are passed to cls.run_executable.
Returns:
str: Version of compiler/interpreter for this language.
"""
if version_flags is None:
version_flags = cls.version_flags
return cls.run_executable(version_flags, **kwargs).splitlines()[0].strip()
@classmethod
def is_installed(cls):
r"""Determine if this model driver is installed on the current
machine.
Returns:
bool: Truth of if this model driver can be run on the current
machine.
"""
return (cls.is_language_installed()
and cls.are_base_languages_installed()
and cls.are_dependencies_installed()
and cls.is_interface_installed() and cls.is_comm_installed()
and cls.is_configured() and (not cls.is_disabled()))
@classmethod
def are_base_languages_installed(cls, missing=None):
r"""Determine if the base languages are installed.
Args:
missing (list, optional): A pre-existing list that
missing base languages should be appended to.
Returns:
bool: True if the base langauges are installed. False otherwise.
"""
out = True
for x in cls.base_languages:
if (not out) and (not isinstance(missing, list)): # pragma: no cover
break
out = import_component('model', x).is_installed()
if isinstance(missing, list) and (not out):
missing.append(x)
if missing:
out = False
return out
@classmethod
def are_dependencies_installed(cls):
r"""Determine if the dependencies are installed for the interface (not
including dependencies needed by a particular communication type).
Returns:
bool: True if the dependencies are installed. False otherwise.
"""
out = (cls.language is not None)
for x in cls.interface_dependencies:
if not out: # pragma: no cover
break
out = cls.is_library_installed(x)
return out
@classmethod
def is_interface_installed(cls):
r"""Determine if the interface library for the associated programming
language is installed.
Returns:
bool: True if the interface library is installed.
"""
out = (cls.language is not None)
if out and (cls.interface_library is not None):
out = cls.is_library_installed(cls.interface_library)
return out
@classmethod
def is_language_installed(cls):
r"""Determine if the interpreter/compiler for the associated programming
language is installed.
Returns:
bool: True if the language interpreter/compiler is installed.
"""
out = False
if cls.language is not None:
try:
out = (shutil.which(cls.language_executable()) is not None)
except NotImplementedError: # pragma: debug
out = False
return out
@classmethod
def identify_source_files(cls, args=None, working_dir=None, **kwargs):
r"""Determine the source file based on model arguments.
Args:
args (list, optional): Arguments provided.
working_dir (str, optional): Working directory.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Source files.
"""
out = []
if args:
src = args[0]
if (((not cls.is_source_file(src))
and (cls.language_ext is not None)
and (os.path.splitext(src)[-1]
not in cls.get_all_language_ext()))):
src = os.path.splitext(src)[0] + cls.language_ext[0]
if working_dir and (not os.path.isabs(src)):
src = os.path.normpath(os.path.join(working_dir, src))
if os.path.isfile(src):
out.append(src)
return out
@classmethod
def is_source_file(cls, fname):
r"""Determine if the provided file name points to a source files for
the associated programming language by checking the extension.
Args:
fname (str): Path to file.
Returns:
bool: True if the provided file is a source file, False otherwise.
"""
out = False
model_ext = os.path.splitext(fname)[-1]
if len(model_ext) > 0:
out = (model_ext in cls.get_language_ext())
return out
@classmethod
def is_library_installed(cls, lib, **kwargs):
r"""Determine if a dependency is installed.
Args:
lib (str): Name of the library that should be checked.
**kwargs: Additional keyword arguments are ignored.
Returns:
bool: True if the library is installed, False otherwise.
"""
raise NotImplementedError("Method is_library_installed missing for '%s'"
% cls.language)
@classmethod
def is_disabled(cls):
return (cls.cfg.get(cls.language, 'disable', 'false').lower() == 'true')
@classmethod
def is_configured(cls):
r"""Determine if the appropriate configuration has been performed (e.g.
installation of supporting libraries etc.)
Returns:
bool: True if the language has been configured.
"""
# Check for section & diable
disable_flag = cls.is_disabled()
out = (cls.cfg.has_section(cls.language) and (not disable_flag))
# Check for commtypes
if out and (len(cls.base_languages) == 0):
out = (cls.cfg.get(cls.language, 'commtypes', None) is not None)
# Check for config keys
for k in cls._config_keys:
if not out: # pragma: no cover
break
out = (cls.cfg.get(cls.language, k, None) is not None)
return out
@classmethod
def is_comm_installed(cls, commtype=None, skip_config=False, **kwargs):
r"""Determine if a comm is installed for the associated programming
language.
Args:
commtype (str, optional): If provided, this method will only test
for installation of the specified communication type. Defaults
to None and will check for any installed comm.
skip_config (bool, optional): If True, the config list of comms
installed for this language will not be used to determine if
the comm is installed and the class attribute
supported_comm_options will be processed. Defaults to False and
config options are used in order to improve performance after
initial configuration.
platforms (list, optional): Platforms on which the comm can be
installed. Defaults to None and is ignored unless there is a
value for the commtype in supported_comm_options. This
keyword argument is ignored if skip_config is False.
libraries (list, optional): External libraries that are required
by the specified commtype. Defaults to None and is ignored
unless there is a value for the commtype in supported_comm_options.
This keyword argument is ignored if skip_config is False.
**kwargs: Additional keyword arguments are passed to either
is_comm_installed for the base languages, supported languages,
or is_library_installed as appropriate.
Returns:
bool: True if a comm is installed for this language.
"""
# If there are base_languages for this language, use that language's
# driver to check for comm installation.
if len(cls.base_languages) > 0:
out = True
for x in cls.base_languages:
if not out: # pragma: no cover
break
out = import_component('model', x).is_comm_installed(
commtype=commtype, skip_config=skip_config, **kwargs)
return out
if cls.comms_implicit:
if commtype is None:
return True
return (commtype in tools.get_supported_comm())
# Check for installation based on config option
if not skip_config:
installed_comms = cls.cfg.get(cls.language, 'commtypes', [])
if commtype is None:
return (len(installed_comms) > 0)
else:
return (commtype in installed_comms)
# Check for any comm
if commtype is None:
for c in cls.supported_comms:
if cls.is_comm_installed(commtype=c, skip_config=skip_config,
**kwargs):
return True
# Check that comm is explicitly supported
if commtype not in cls.supported_comms:
return False
# Set & pop keywords
for k, v in cls.supported_comm_options.get(commtype, {}).items():
if kwargs.get(k, None) is None:
kwargs[k] = v
platforms = kwargs.pop('platforms', None)
libraries = kwargs.pop('libraries', [])
# Check platforms
if (platforms is not None) and (platform._platform not in platforms):
return False # pragma: windows
# Check libraries
if (libraries is not None):
for lib in libraries:
if not cls.is_library_installed(lib, **kwargs):
return False
# Check for server on RabbitMQ
if commtype in ['rmq', 'rmq_async']:
from yggdrasil.communication.RMQComm import check_rmq_server
if not check_rmq_server():
return False
return True
@classmethod
def configure(cls, cfg):
r"""Add configuration options for this language.
Args:
cfg (CisConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
out = []
# Section and executable
if (cls.language is not None) and (not cfg.has_section(cls.language)):
cfg.add_section(cls.language)
# Executable type configuration
out += cls.configure_executable_type(cfg)
# Locate executable
if (((not cls.is_language_installed())
and (cls.executable_type is not None))): # pragma: debug
try:
exec_file = cls.language_executable()
if exec_file is not None:
fpath = tools.locate_file(
exec_file, directory_list=cls._executable_search_dirs)
if fpath:
cfg.set(cls.language, cls.executable_type, fpath)
except NotImplementedError:
pass
# Configure libraries
out += cls.configure_libraries(cfg)
# Only do additional configuration if no base languages
if not cls.base_languages:
# Installed comms
comms = []
for c in cls.supported_comms:
if cls.is_comm_installed(commtype=c, cfg=cfg, skip_config=True):
comms.append(c)
cfg.set(cls.language, 'commtypes', comms)
cls.after_registration(cls, cfg=cfg, second_pass=True)
return out
@classmethod
def configure_executable_type(cls, cfg):
r"""Add configuration options specific in the executable type
before the libraries are configured.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
return []
@classmethod
def configure_libraries(cls, cfg):
r"""Add configuration options for external libraries in this language.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
return []
def get_io_env(self, input_drivers=None, output_drivers=None):
r"""Get environment variables set by the input/output drivers.
Args:
input_drivers (list, optional): Input drivers. Defaults to the
yaml entry if not provided.
output_drivers (list, optional): Output drivers. Defaults to the
yaml entry if not provided.
Returns:
dict: Environment variables.
"""
if input_drivers is None:
input_drivers = self.yml.get('input_drivers', [])
if output_drivers is None:
output_drivers = self.yml.get('output_drivers', [])
out = {}
if self.copies > 1:
from yggdrasil.drivers.DuplicatedModelDriver import (
DuplicatedModelDriver)
base_name = DuplicatedModelDriver.get_base_name(self.name)
else:
base_name = self.name
for x in input_drivers + output_drivers:
if 'instance' in x:
model_env = x['instance'].model_env
if self.name in model_env:
out.update(model_env[self.name])
elif base_name in model_env:
out.update(model_env[base_name])
return out
@classmethod
def set_env_class(cls, existing=None, **kwargs):
r"""Set environment variables that are instance independent.
Args:
existing (dict, optional): Existing dictionary of environment
variables that new variables should be added to. Defaults
to a copy of os.environ.
**kwargs: Additional keyword arguments are ignored.
Returns:
dict: Environment variables for the model process.
"""
if existing is None: # pragma: no cover
existing = {}
existing.update(os.environ)
return existing
def set_env(self, existing=None, **kwargs):
r"""Get environment variables that should be set for the model process.
Args:
existing (dict, optional): Existing dictionary of environment
variables that new variables should be added to. Defaults
to a copy of os.environ.
**kwargs: Additional keyword arguments are passed to set_env_class.
Returns:
dict: Environment variables for the model process.
"""
from yggdrasil.config import ygg_cfg
if existing is None:
existing = {}
existing.update(copy.deepcopy(self.env))
existing.update(self.get_io_env())
env = self.set_env_class(existing=existing, **kwargs)
env.update(YGG_SUBPROCESS="True",
YGG_MODEL_INDEX=str(self.model_index),
YGG_MODEL_LANGUAGE=self.language,
YGG_MODEL_COPIES=str(self.copies),
# YGG_PYTHON_EXEC=sys.executable,
YGG_DEFAULT_COMM=tools.get_default_comm(),
YGG_NCLIENTS=str(len(self.clients)))
if multitasking._on_mpi:
env['YGG_MPI_RANK'] = str(multitasking._mpi_rank)
if self.copies > 1:
from yggdrasil.drivers.DuplicatedModelDriver import (
DuplicatedModelDriver)
env['YGG_MODEL_COPY'] = str(self.copy_index)
env['YGG_MODEL_NAME'] = DuplicatedModelDriver.get_base_name(
self.name)
else:
env['YGG_MODEL_NAME'] = self.name
if self.allow_threading or (self.copies > 1):
env['YGG_THREADING'] = '1'
if isinstance(self.is_server, dict):
env['YGG_SERVER_INPUT'] = self.is_server['input']
env['YGG_SERVER_OUTPUT'] = self.is_server['output']
if self.logging_level:
env['YGG_MODEL_DEBUG'] = self.logging_level
replace = [k for k in env.keys() if ':' in k]
for k in replace:
env[k.replace(':', '__COLON__')] = env.pop(k)
if ygg_cfg.get('general', 'allow_multiple_omp', False):
env['KMP_DUPLICATE_LIB_OK'] = 'True'
return env
def before_start(self, no_queue_thread=False, **kwargs):
r"""Actions to perform before the run starts.
Args:
no_queue_thread (bool, optional): If True, the queue_thread is not
created/started. Defaults to False.
**kwargs: Keyword arguments are pased to run_model.
"""
# if multitasking._on_mpi:
# self.init_mpi_env()
self.model_process = self.run_model(**kwargs)
# Start thread to queue output
if not no_queue_thread:
self.queue_thread = multitasking.YggTaskLoop(
target=self.enqueue_output_loop,
name=self.name + '.EnqueueLoop')
self.queue_thread.start()
if multitasking._on_mpi:
self.init_mpi()
def queue_close(self):
r"""Close the queue for messages from the model process."""
self.model_process.stdout.close()
def queue_recv(self):
r"""Receive a message from the model process."""
return self.model_process.stdout.readline()
def enqueue_output_loop(self):
r"""Keep passing lines to queue."""
try:
line = self.queue_recv()
except BaseException as e: # pragma: debug
print(e)
line = ""
if (len(line) == 0):
self.queue_thread.set_break_flag()
try:
self.queue.put(self._exit_line)
except multitasking.AliasDisconnectError: # pragma: debug
self.error("Queue disconnected")
self.debug("End of model output")
try:
self.queue_close()
except BaseException: # pragma: debug
pass
else:
try:
self.queue.put(line.decode('utf-8'))
except BaseException as e: # pragma: debug
warnings.warn("Error in printing output: %s" % e)
def before_loop(self):
r"""Actions before loop."""
self.debug('Running %s from %s with cwd %s and env %s',
self.model_command(), os.getcwd(), self.working_dir,
pformat(self.env))
# def init_mpi_env(self):
# r"""Receive env information to the partner model."""
# self.env = self.recv_mpi(tag=self._mpi_tags['ENV'])
def init_mpi(self):
r"""Initialize MPI communicator."""
if self._mpi_rank == 0:
self._mpi_comm = None
else:
self.recv_mpi(tag=self._mpi_tags['START'])
self._mpi_requests['stopped'] = multitasking.MPIRequestWrapper(
self.recv_mpi(tag=self._mpi_tags['STOP_RANKX'],
dont_block=True))
def send_mpi(self, msg, tag=0, dont_block=False):
r"""Send an MPI message."""
self.debug("send %d (%d) [%s]: %s (blocking=%s)", tag,
self._mpi_tag + tag, self._inv_mpi_tags[tag],
msg, not dont_block)
kws = {'dest': self._mpi_partner_rank, 'tag': (self._mpi_tag + tag)}
if dont_block: # pragma: debug
# return self._mpi_comm.isend(msg, **kws)
raise NotImplementedError("Non-blocking MPI send not tested.")
else:
return self._mpi_comm.send(msg, **kws)
def recv_mpi(self, tag=0, dont_block=False):
r"""Receive an MPI message."""
self.debug('recv %d (%d) [%s] (blocking=%s)', tag,
self._mpi_tag + tag, self._inv_mpi_tags[tag],
not dont_block)
kws = {'source': self._mpi_partner_rank, 'tag': (self._mpi_tag + tag)}
if dont_block:
return self._mpi_comm.irecv(**kws)
else:
return self._mpi_comm.recv(**kws)
def stop_mpi_partner(self, msg=None, dest=0, tag=None):
r"""Send a message to stop the MPI partner model on the main process."""
if self._mpi_comm and (not self.check_mpi_request('stopping')):
if tag is None:
tag = self._mpi_tags['STOP_RANK0']
if msg is None:
if self.errors or self.model_process_returncode:
msg = 'ERROR'
else:
msg = 'STOPPING'
self.debug("stop_mpi_partner: %d, %s", tag, msg)
# Don't call test()
self._mpi_requests['stopping'] = multitasking.MPIRequestWrapper(
self.send_mpi(msg, tag=tag), completed=True)
def wait_on_mpi_request(self, name, timeout=False):
r"""Wait for a request to be completed.
Args:
name (str): Name that request was registered under.
Returns:
bool, str: Received message or False if the request does not
exist or is not complete.
"""
self.debug("Waiting on '%s' (timeout=%s)", name, timeout)
try:
out = self._mpi_requests[name].wait(timeout=timeout)
if out == 'ERROR': # pragma: debug
self.errors.append(out)
return out
except asyncio.TimeoutError: # pragma: debug
self.info("Timeout for MPI '%s' request", name)
def check_mpi_request(self, name):
r"""Check if a request has been completed.
Args:
name (str): Name that request was registered under.
Returns:
bool, str: Received message or False if the request does not
exist or is not complete.
"""
if self._mpi_comm and (name in self._mpi_requests):
out, msg = self._mpi_requests[name].test()
if out and (msg == 'ERROR'): # pragma: debug
self.errors.append(msg)
return out
return False
def set_break_flag(self, *args, **kwargs):
r"""Stop the model loop."""
self.stop_mpi_partner()
super(ModelDriver, self).set_break_flag(*args, **kwargs)
def run_loop(self):
r"""Loop to check if model is still running and forward output."""
# Continue reading until there is not any output
if self.model_process_returncode:
self.errors.append(self.model_process_returncode)
if self.check_mpi_request('stopped'):
self.debug("Stop requested by MPI partner.")
self.set_break_flag()
try:
line = self.queue.get_nowait()
except Empty:
# This sleep is necessary to allow changes in queue without lock
self.sleep()
return
except multitasking.AliasDisconnectError: # pragma: debug
self.error("Queue disconnected")
self.set_break_flag()
else:
if (line == self._exit_line) or self.check_mpi_request('stopped'):
self.debug("No more output")
self.set_break_flag()
else:
self.print_encoded(line, end="")
sys.stdout.flush()
def run_finally(self):
r"""Actions to perform in finally clause of try/except wrapping
run."""
# Ensure the MPI partner gets cleaned up following an error
self.stop_mpi_partner()
super(ModelDriver, self).run_finally()
def after_loop(self):
r"""Actions to perform after run_loop has finished. Mainly checking
if there was an error and then handling it."""
self.debug('')
self.stop_mpi_partner()
if self.queue_thread is not None:
self.queue_thread.join(self.sleeptime)
if self.queue_thread.is_alive():
self.debug("Queue thread still alive")
# Loop was broken from outside, kill the queueing thread
self.kill_process()
return
self.wait_process(self.timeout, key_suffix='.after_loop')
self.kill_process()
self.debug(("Closing input/output drivers:\n"
"\tinput: %s\n\toutput: %s")
% ([drv['name'] for drv in
self.yml.get('input_drivers', [])],
[drv['name'] for drv in
self.yml.get('output_drivers', [])]))
for drv in self.yml.get('input_drivers', []):
if 'instance' in drv:
if self.language == 'mpi':
drv['instance'].wait(self.timeout)
drv['instance'].on_model_exit('output', self.name,
errors=self.errors)
for drv in self.yml.get('output_drivers', []):
if 'instance' in drv:
if self.language == 'mpi':
drv['instance'].wait(self.timeout)
drv['instance'].on_model_exit('input', self.name,
errors=self.errors)
@property
def io_errors(self):
r"""list: Errors produced by input/output drivers to this model."""
errors = []
for drv in self.yml.get('input_drivers', []):
if 'instance' in drv:
errors += drv['instance'].errors
for drv in self.yml.get('output_drivers', []):
if 'instance' in drv:
errors += drv['instance'].errors
return errors
@property
def model_process_complete(self):
r"""bool: Has the process finished or not. Returns True if the process
has not started."""
if self.model_process is None: # pragma: debug
return True
return (self.model_process.poll() is not None)
@property
def model_process_returncode(self):
r"""int: Return code for the model process where non-zero values
indicate that there was an error."""
if self.model_process_complete and (self.model_process is not None):
return self.model_process.returncode
return 0
def wait_process(self, timeout=None, key=None, key_suffix=None):
r"""Wait for some amount of time for the process to finish.
Args:
timeout (float, optional): Time (in seconds) that should be waited.
Defaults to None and is infinite.
key (str, optional): Key that should be used to register the timeout.
Defaults to None and set based on the stack trace.
Returns:
bool: True if the process completed. False otherwise.
"""
if not self.was_started: # pragma: debug
return True
return self.wait_on_function(lambda: self.model_process_complete,
timeout=timeout, key_level=1, key=key,
key_suffix=key_suffix)
def kill_process(self):
r"""Kill the process running the model, checking return code."""
if not self.was_started: # pragma: debug
self.debug('Process was never started.')
self.set_break_flag()
self.event_process_kill_called.set()
self.event_process_kill_complete.set()
if self.event_process_kill_called.is_set(): # pragma: debug
self.debug('Process has already been killed.')
return
self.event_process_kill_called.set()
with self.lock:
self.debug('')
ignore_error_code = False
if not self.model_process_complete: # pragma: debug
self.debug("Process is still running. Killing it.")
try:
self.model_process.kill()
self.debug("Waiting %f s for process to be killed",
self.timeout)
self.wait_process(self.timeout, key_suffix='.kill_process')
except BaseException: # pragma: debug
self.exception("Error killing model process")
if not self.has_sent_messages:
ignore_error_code = True
assert(self.model_process_complete)
if (((self.model_process_returncode != 0)
and (not ignore_error_code))):
self.error(("return code of %s indicates model error. "
"(sent messages: %s)"),
str(self.model_process_returncode),
self.n_sent_messages)
self.event_process_kill_complete.set()
if self.queue_thread is not None:
if not self.was_break: # pragma: debug
# Wait for messages to be printed
self.debug("Waiting for queue_thread to finish up.")
self.queue_thread.wait(self.timeout)
if self.queue_thread.is_alive(): # pragma: debug
self.debug("Setting break flag for queue_thread to finish up.")
self.queue_thread.set_break_flag()
self.queue_thread.wait(self.timeout)
try:
self.queue_close()
self.queue_thread.wait(self.timeout)
except BaseException: # pragma: debug
self.exception("Closed during concurrent action")
if self.queue_thread.is_alive(): # pragma: debug
self.error("Queue thread was not terminated.")
def graceful_stop(self):
r"""Gracefully stop the driver."""
self.debug('')
if self.has_sent_messages:
self.wait_process(self.timeout, key_suffix='.graceful_stop')
super(ModelDriver, self).graceful_stop()
def cleanup_products(self):
r"""Remove products created in order to run the model."""
if self.overwrite and (not self.preserve_cache):
self.remove_products()
self.restore_files()
def cleanup(self):
r"""Remove compile executable."""
self.cleanup_products()
super(ModelDriver, self).cleanup()
def restore_files(self):
r"""Restore modified files to their original form."""
for (original, modified) in self.modified_files:
if os.path.isfile(original):
os.remove(modified)
shutil.move(original, modified)
def remove_products(self):
r"""Delete products produced during the process of running the model."""
products = self.products
source_products = self.source_products + self.wrapper_products
remove_products(products, source_products)
@classmethod
def cleanup_dependencies(cls, products=[], verbose=False):
r"""Cleanup dependencies."""
for x in products:
if os.path.isfile(x):
if verbose: # pragma: debug
print("Removing %s" % x)
os.remove(x)
# Methods for automated model wrapping
@classmethod
def run_code(cls, lines, process_kwargs={}, **kwargs):
r"""Run code by first writing it as an executable and then calling
the driver.
Args:
lines (list): Lines of code to be wrapped as an executable.
process_kwargs (dict, optional): Keyword arguments that should
be passed to run_model. Defaults to {}.
**kwargs: Additional keyword arguments are passed to the
write_executable method.
"""
name = 'test_code_%s' % str(uuid.uuid4())[:13].replace('-', '_')
working_dir = os.getcwd()
code_dir = tempfile.gettempdir()
# code_dir = working_dir
fname = os.path.join(code_dir, name + cls.get_language_ext()[0])
lines = cls.write_executable(lines, **kwargs)
with open(fname, 'w') as fd:
fd.write('\n'.join(lines))
inst = None
try:
assert(os.path.isfile(fname))
inst = cls(name, [fname], working_dir=working_dir)
inst.run_model(return_process=False, **process_kwargs)
except BaseException: # pragma: debug
logger.error('Failed generated code:\n%s' % '\n'.join(lines))
raise
finally:
if os.path.isfile(fname):
os.remove(fname)
if inst is not None:
inst.cleanup()
@classmethod
def format_function_param(cls, key, default=None, replacement=None,
ignore_method=False, **kwargs):
r"""Return the formatted version of the specified key.
Args:
key (str): Key in cls.function_param mapping that should be
formatted.
default (str, optional): Format that should be returned if key
is not in cls.function_param. Defaults to None.
replacement (str, optional): Format that should be used instead
of the one in cls.function_param. Defaults to None.
**kwargs: Additional keyword arguments are used in formatting the
request function parameter.
Returns:
str: Formatted string.
Raises:
NotImplementedError: If key is not in cls.function_param and default
is not set.
"""
if replacement is not None:
fmt = replacement
elif (not ignore_method) and hasattr(cls, 'format_function_param_%s' % key):
return getattr(cls, 'format_function_param_%s' % key)(**kwargs)
else:
if (key not in cls.function_param) and (default is None):
raise NotImplementedError(("Language %s dosn't have an entry in "
"function_param for key '%s'")
% (cls.language, key))
fmt = cls.function_param.get(key, default)
return fmt.format(**kwargs)
@classmethod
def parse_var_definition(cls, io, value, outputs_in_inputs=None):
r"""Extract information about input/output variables from a
string definition.
Args:
io (str): Description of variables contained in the provided
string. Must be 'inputs' or 'outputs'.
value (str): String containing one or more variable definitions.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
Returns:
list: List of information about the variables contained in
the provided string.
Raises:
AssertionError: If io is not 'inputs' or 'outputs'.
NotImplementedError: If the def_regex for the specified
io is not defined.
"""
if outputs_in_inputs is None:
outputs_in_inputs = cls.outputs_in_inputs
assert(io in ['inputs', 'outputs'])
if ('%s_def_regex' % io) not in cls.function_param: # pragma: debug
raise NotImplementedError(
("'%s_def_regex' not defined for "
"language %s.") % (io, cls.language))
if 'multiple_outputs' in cls.function_param:
multi_re = cls.function_param['multiple_outputs']
for x in '[]()':
multi_re = multi_re.replace(x, '\\' + x)
multi_re = multi_re.format(outputs='(.*?)')
match = re.search(multi_re, value)
if match is not None:
value = match.group(1)
new_val = []
io_re = cls.format_function_param('%s_def_regex' % io)
for i, ivar in enumerate(cls.split_variables(value)):
igrp = {'name': ivar}
x = re.search(io_re, ivar)
if x is not None:
igrp = x.groupdict()
for k in list(igrp.keys()):
if igrp[k] is None:
del igrp[k]
if 'native_type' in igrp:
igrp['native_type'] = igrp['native_type'].replace(' ', '')
igrp['datatype'] = cls.get_json_type(igrp['native_type'])
igrp['position'] = i
if (io == 'outputs') and outputs_in_inputs:
igrp = cls.input2output(igrp)
new_val.append(igrp)
return new_val
@classmethod
def parse_function_definition(cls, model_file, model_function,
contents=None, match=None,
expected_outputs=[], outputs_in_inputs=None):
r"""Get information about the inputs & outputs to a model from its
defintition if possible.
Args:
model_file (str): Full path to the file containing the model
function's declaration.
model_function (str): Name of the model function.
contents (str, optional): String containing the function definition.
If not provided, the function definition is read from model_file.
match (re.Match, optional): Match object for the function regex. If
not provided, a search is performed using function_def_regex.
expected_outputs (list, optional): List of names or variable
information dictionaries for outputs that are expected
to be extracted from the function's definition. This
variable is only used if outputs_in_inputs is True and
outputs are not extracted from the function's defintion
using the regex for this language. Defaults to [].
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
Returns:
dict: Parameters extracted from the function definitions.
"""
if outputs_in_inputs is None:
outputs_in_inputs = cls.outputs_in_inputs
out = {}
if match or ('function_def_regex' in cls.function_param):
if not match:
function_regex = cls.format_function_param(
'function_def_regex', function_name=model_function)
if contents is None:
with open(model_file, 'r') as fd:
contents = fd.read()
match = re.search(function_regex, contents)
if not match: # pragma: debug
raise RuntimeError(("Could not find function match in file:\n"
"%s\nfor regex:\nr'%s'")
% (pformat(contents), function_regex))
# Match brackets to determine where the function definition is
if isinstance(cls.brackets, tuple):
assert(len(cls.brackets) == 2)
contents = match.group(0)
counts = {k: 0 for k in cls.brackets}
first_zero = 0
re_brackets = r'[\%s\%s]' % cls.brackets
for x in re.finditer(re_brackets, contents):
counts[x.group(0)] += 1
if (((counts[cls.brackets[0]] > 0)
and (counts[cls.brackets[0]]
== counts[cls.brackets[1]]))):
first_zero = x.span(0)[1]
break
assert((first_zero == 0) or (first_zero == len(contents)))
# This is currently commented as regex's are
# sufficient so far, but this may be needed in the
# future to isolate single definitions.
# if (first_zero != 0) and first_zero != len(contents):
# contents = contents[:first_zero]
# match = re.search(function_regex, contents)
# assert(match)
out = match.groupdict()
for k in list(out.keys()):
if out[k] is None:
del out[k]
for io in ['inputs', 'outputs']:
if io in out:
out[io] = cls.parse_var_definition(
io, out[io], outputs_in_inputs=outputs_in_inputs)
out['model_file'] = model_file
if outputs_in_inputs and expected_outputs and (not out.get('outputs', False)):
missing_expected_outputs = []
for o in expected_outputs:
if isinstance(o, dict):
o = o['name']
missing_expected_outputs.append(o)
out['outputs'] = []
for x in out['inputs']:
if x['name'] not in missing_expected_outputs:
continue
missing_expected_outputs.remove(x['name'])
out['outputs'].append(cls.input2output(x))
if missing_expected_outputs: # pragma: debug
raise ValueError(("Could not locate %d output "
"variable(s) in input: %s")
% (len(missing_expected_outputs),
missing_expected_outputs))
for x in out['outputs']:
out['inputs'].remove(x)
if out.get('flag_var', None):
flag_var = {'name': out.pop('flag_var'),
'datatype': {'type': 'flag'}}
if out.get('flag_type', None):
flag_var['native_type'] = out.pop('flag_type').replace(' ', '')
flag_var['datatype'] = cls.get_json_type(flag_var['native_type'])
out['flag_var'] = flag_var
cls.check_flag_var(out, outputs_in_inputs=outputs_in_inputs)
return out
@classmethod
def check_flag_var(cls, info, outputs_in_inputs=None):
r"""Check if the flag variable should be treated as an output.
Args:
info (dict): Information about the function.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
"""
if outputs_in_inputs is None: # pragma: debug
outputs_in_inputs = cls.outputs_in_inputs
flag_t = cls.type_map['flag']
if (((info.get('flag_var', {}).get('native_type', flag_t) != flag_t)
or (not outputs_in_inputs))):
if info.get('outputs', []): # pragma: debug
logger.warn("Support for returning outputs via parameter(s) "
"and return value is not yet support. The return "
"value will be assumed to be a flag indicating "
"the success of the model.")
info['outputs_in_inputs'] = True
else:
info['outputs'] = [info.pop('flag_var')]
info['outputs_in_inputs'] = False
@classmethod
def channels2vars(cls, channels):
r"""Convert a list of channels to a list of variables.
Args:
channels (list): List of channel dictionaries.
Returns:
list: List of variables.
"""
if not isinstance(channels, list):
channels = [channels]
variables = []
for x in channels:
variables += x['vars']
def get_pos(x):
return x.get('position', 0)
variables = sorted(variables, key=get_pos)
return variables
@classmethod
def expand_server_io(cls, inputs, outputs, client_comms=[]):
r"""Update inputs/outputs w/ information about server that will be
using them.
Args:
inputs (list): List of model inputs including types.
outputs (list): List of model outputs including types.
client_comms (list, optional): List of the names of client comms
that should be removed from the list of outputs. Defaults to [].
"""
if client_comms:
warnings.warn("When wrapping a model function, client comms "
"must either be initialized outside the function, "
"pass a 'global_scope' parameter to the "
"comm initialization (e.g. Python, R, Matlab), "
"or use a 'WITH_GLOBAL_SCOPE' macro "
"(e.g. C, C++, Fortran) around the initialization "
"so that they are persistent "
"across calls and the call or recv/send methods "
"must be called explicitly (as opposed to the "
"function inputs/outputs which will be handled "
"by the wrapper). This model's client comms are:\n"
"\t%s" % client_comms)
# Replace server input w/ split input/output and remove client
# connections from inputs
for i, x in enumerate(inputs):
if x.get('server_replaces', False):
inputs[x['server_replaces']['input_index']] = (
x['server_replaces']['input'])
outputs.insert(x['server_replaces']['output_index'],
x['server_replaces']['output'])
rm_outputs = [i for i, x in enumerate(outputs)
if x['name'] in client_comms]
for i in rm_outputs[::-1]:
outputs.pop(i)
@classmethod
def preparse_function(cls, yml):
r"""Extract information about inputs and outputs based on the
function being wrapped.
Args:
yml (dict): Options that will be used to initialize the model.
Returns:
dict: Information about the parsed function.
"""
if 'function' not in yml:
return
if yml.get('is_server', False):
assert(isinstance(yml['is_server'], dict))
if cls.function_param is None:
raise ValueError(("Language %s is not parameterized "
"and so functions cannot be automatically "
"wrapped as a model.") % cls.language)
source_files = cls.identify_source_files(**yml)
if not source_files: # pragma: debug
raise ValueError("Could not identify any source files.")
model_function_file = source_files[0]
if not os.path.isfile(model_function_file): # pragma: debug
raise ValueError("Source file does not exist: '%s'"
% model_function_file)
# Update input/outputs based on parsed source code
client_comms = ['%s:%s_%s' % (yml['name'], x, yml['name'])
for x in yml.get('client_of', [])]
model_function_inputs = copy.copy(yml.get('inputs', []))
model_function_outputs = copy.copy(yml.get('outputs', []))
cls.expand_server_io(
model_function_inputs, model_function_outputs,
client_comms=client_comms)
expected_outputs = []
for x in model_function_outputs:
expected_outputs += x.get('vars', [])
model_outputs_in_inputs = yml.get('outputs_in_inputs', None)
model_function_info = cls.parse_function_definition(
model_function_file, yml['function'],
expected_outputs=expected_outputs,
outputs_in_inputs=model_outputs_in_inputs)
if model_outputs_in_inputs is None:
model_outputs_in_inputs = model_function_info.get(
'outputs_in_inputs', None)
model_flag = cls.update_io_from_function(
model_function_info, yml['function'],
inputs=model_function_inputs,
outputs=model_function_outputs,
iter_function_over=yml.get('iter_function_over', []))
yml['preparsed_function'] = {
'model_file': model_function_info,
'model_function': yml['function'],
'inputs': model_function_inputs,
'outputs': model_function_outputs,
'model_flag': model_flag,
'outputs_in_inputs': model_outputs_in_inputs,
'copies': yml.get('copies', 1),
'iter_function_over': yml.get('iter_function_over', []),
'skip_update_io': True}
return yml['preparsed_function']
@classmethod
def update_io_from_function(cls, model_file, model_function,
inputs=[], outputs=[], contents=None,
outputs_in_inputs=None, iter_function_over=[]):
r"""Update inputs/outputs from the function definition.
Args:
model_file (str): Full path to the file containing the model
function's declaration.
model_function (str): Name of the model function.
inputs (list, optional): List of model inputs including types.
Defaults to [].
outputs (list, optional): List of model outputs including types.
Defaults to [].
contents (str, optional): Contents of file to parse rather than
re-reading the file. Defaults to None and is ignored.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
iter_function_over (array, optional): Variable(s) that should be
received or sent as an array, but iterated over. Defaults to
an empty array and is ignored.
Returns:
dict, None: Flag variable used by the model. If None, the
model does not use a flag variable.
"""
# Read info from the source code
if (((isinstance(model_file, str) and os.path.isfile(model_file))
or (contents is not None))): # pragma: debug
expected_outputs = []
for x in outputs:
expected_outputs += x.get('vars', [])
info = cls.parse_function_definition(model_file, model_function,
contents=contents,
expected_outputs=expected_outputs)
logger.warn("The new execution pattern reuses the parsed "
"source code parameters. Double check results:\n%s."
% pformat(info))
elif isinstance(model_file, dict):
info = model_file
else:
info = {"inputs": [], "outputs": []}
if outputs_in_inputs is None: # pragma: debug
outputs_in_inputs = info.get('outputs_in_inputs',
cls.outputs_in_inputs)
info_map = {io: OrderedDict([(x['name'], x) for x in info.get(io, [])])
for io in ['inputs', 'outputs']}
# Determine flag variable
flag_var = None
if info.get('flag_var', None):
flag_var = dict(info['flag_var'], name='model_flag')
# Check for vars matching names of input/output channels
for io, io_var in zip(['inputs', 'outputs'], [inputs, outputs]):
if (io == 'outputs') and outputs_in_inputs:
io_map = info_map['inputs']
else:
io_map = info_map[io]
for x in io_var:
if x.get('vars', []):
continue
var_name = x['name'].split(':')[-1]
if var_name in io_map:
x['vars'] = [var_name]
for k in ['length', 'shape', 'ndim']:
kvar = '%s_var' % k
if kvar in io_map[var_name]:
x['vars'].append(io_map[var_name][kvar])
# Move variables if outputs in inputs
if outputs_in_inputs:
if ((((len(inputs) + len(outputs)) == len(info.get('inputs', [])))
and (len(info.get('outputs', [])) == 0))):
for i, vdict in enumerate(info['inputs'][:len(inputs)]):
inputs[i].setdefault('vars', [vdict['name']])
assert(inputs[i]['vars'] == [vdict['name']])
for i, vdict in enumerate(info['inputs'][len(inputs):]):
outputs[i].setdefault('vars', [vdict['name']])
assert(outputs[i]['vars'] == [vdict['name']])
for x in outputs:
for i, v in enumerate(x.get('vars', [])):
if v in info_map['inputs']:
info_map['outputs'][v] = cls.input2output(
info_map['inputs'].pop(v))
for io, io_var in zip(['inputs', 'outputs'], [inputs, outputs]):
for x in io_var:
x['channel_name'] = x['name']
x['channel'] = (x['name'].split(':', 1)[-1]
+ '_%s_channel' % io[:-1])
for i, v in enumerate(x.get('vars', [])):
if v in info_map[io]:
x['vars'][i] = info_map[io][v]
if (len(io_var) == 1) and info_map.get(io, False):
io_var[0].setdefault('vars', list(info_map[io].values()))
for x in io_var:
if 'vars' not in x:
x['vars'] = [copy.deepcopy(x)]
x['vars'][0]['name'] = x['name'].split(':', 1)[-1]
for v in x['vars']:
if isinstance(v.get('datatype', None), str):
v['datatype'] = {'type': v['datatype']}
if isinstance(x.get('datatype', None), str):
x['datatype'] = {'type': x['datatype']}
# Check for user defined length variables and add flag to
# length variables
for x in io_var:
for k in ['length', 'shape', 'ndim']:
for v in x['vars']:
if k + '_var' in v:
v[k + '_var'] = info_map[io][v[k + '_var']]
# v[k + '_var']['is_' + k + '_var'] = True
v[k + '_var']['is_length_var'] = True
else:
v[k + '_var'] = False
# Update datatypes
if cls.is_typed:
for x in io_var:
non_length = []
for v in x['vars']:
if not v.get('is_length_var', False):
non_length.append(v)
if ((x.get('datatype', None)
and (not is_default_typedef(x['datatype'])))):
if (len(non_length) == 1):
non_length[0]['datatype'] = x['datatype']
else:
# TODO: Remove types associated with length?
assert(x['datatype']['type'] == 'array')
assert(len(x['datatype']['items'])
== len(non_length))
for v, t in zip(non_length, x['datatype']['items']):
v['datatype'] = t
else:
if (len(non_length) == 1):
x['datatype'] = non_length[0]['datatype']
else:
x['datatype'] = {
'type': 'array',
'items': [v['datatype'] for v in non_length]}
x['datatype']['from_function'] = True
for v in x['vars']:
if 'native_type' not in v:
v['native_type'] = cls.get_native_type(**v)
# Update types based on iteration
for x in io_var:
for v in x.get('vars', [x]):
if v['name'] in iter_function_over:
v['iter_datatype'] = copy.deepcopy(v.get('datatype', {}))
if v.get('datatype', {}):
assert(v['datatype']['type'] == 'scalar')
v['datatype']['type'] = '1darray'
v.pop('native_type', None)
v['native_type'] = cls.get_native_type(**v)
# Finalize io variables
for x in inputs:
cls.finalize_function_io('input', x)
for x in outputs:
cls.finalize_function_io('output', x)
return flag_var
@classmethod
def finalize_function_io(cls, direction, x):
r"""Finalize info for an input/output channel following function
parsing.
Args:
direction (str): Direction of channel ('input' or 'output')
"""
assert(direction in ['input', 'output'])
@classmethod
def write_model_wrapper(cls, model_file, model_function,
inputs=[], outputs=[], model_flag=None,
outputs_in_inputs=None, verbose=False, copies=1,
iter_function_over=[], verbose_model=False,
skip_update_io=False, model_name=None):
r"""Return the lines required to wrap a model function as an integrated
model.
Args:
model_file (str): Full path to the file containing the model
function's declaration.
model_function (str): Name of the model function.
inputs (list, optional): List of model inputs including types.
Defaults to [].
outputs (list, optional): List of model outputs including types.
Defaults to [].
model_flag (dict, optional): Information about the flag that
should be used to track the success of yggdrasil send/recv
calls. This should only be provided if update_io_from_function
has already been called. Defaults to None and is determined
by update_io_from_function.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to the class attribute outputs_in_inputs.
verbose (bool, optional): If True, the contents of the created file
are displayed. Defaults to False.
copies (int, optional): Number of times the model driver is
duplicated. If more than one, no error will be raised in the
event there is never a call the the function. Defaults to 1.
iter_function_over (array, optional): Variable(s) that should be
received or sent as an array, but iterated over. Defaults to
an empty array and is ignored.
skip_update_io (bool, optional): If True, update_io_from_function
will not be called. Defaults to False.
verbose_model (bool, optional): If True, print statements will
be added after every line in the model. Defaults to False.
model_name (str, optional): Name given to the model. Defaults to
None.
Returns:
list: Lines of code wrapping the provided model with the necessary
code to run it as part of an integration.
"""
if outputs_in_inputs is None:
outputs_in_inputs = cls.outputs_in_inputs
# TODO: Determine how to encode dependencies on external variables in models
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
# Update types based on the function definition for typed languages
if not skip_update_io:
model_flag = cls.update_io_from_function(
model_file, model_function,
inputs=inputs, outputs=outputs,
outputs_in_inputs=outputs_in_inputs,
iter_function_over=iter_function_over)
if isinstance(model_file, dict):
model_file = model_file['model_file']
# Update types based on iteration
iter_function_idx = None
iter_ivars = []
iter_ovars = []
if iter_function_over:
iter_function_idx = {'name': 'idx_func_iter',
'datatype': {'type': 'int'}}
if cls.zero_based:
iter_function_idx['begin'] = int(0)
else:
iter_function_idx['begin'] = int(1)
for x in inputs:
iter_ivars += [v for v in x.get('vars', [x])
if v['name'] in iter_function_over]
if not iter_ivars: # pragma: debug
raise RuntimeError("The iter_function_over model "
"parameter must include an input to "
"iterate over. To expand output arrays "
"into component elements, use the "
"'iterate' transformation.")
for x in outputs:
iter_ovars += [v for v in x.get('vars', [x])
if v['name'] in iter_function_over]
if iter_ivars[0].get('length_var', False):
iter_function_idx['end'] = iter_ivars[0]['length_var']
for v in iter_ovars:
v['length_var'] = iter_ivars[0]['length_var']['name']
if isinstance(iter_function_idx['end'], dict):
iter_function_idx['end'] = iter_function_idx['end']['name']
else:
iter_function_idx['end'] = cls.format_function_param(
'len', variable=iter_ivars[0]['name'],
extra=iter_ivars[0])
for v in iter_ivars + iter_ovars:
v['iter_var'] = iter_function_idx
# Declare variables and flag, then define flag
lines = []
flag_var = {'name': 'flag', 'datatype': {'type': 'flag'}}
iter_var = {'name': 'first_iter', 'datatype': {'type': 'flag'}}
free_vars = []
definitions = []
if 'declare' in cls.function_param:
for x in inputs + outputs:
lines += cls.write_channel_decl(
x, definitions=definitions,
requires_freeing=free_vars)
lines += cls.write_declaration(flag_var,
definitions=definitions,
requires_freeing=free_vars)
lines += cls.write_declaration(iter_var,
definitions=definitions,
requires_freeing=free_vars)
if model_flag:
lines += cls.write_declaration(
model_flag, definitions=definitions,
requires_freeing=free_vars)
if iter_function_idx:
lines += cls.write_declaration(
iter_function_idx, definitions=definitions,
requires_freeing=free_vars)
for x in inputs + outputs:
for v in x.get('vars', [x]):
lines += cls.write_declaration(
v, definitions=definitions,
requires_freeing=free_vars)
lines += definitions
nline_preamble = len(lines)
lines.append(cls.format_function_param(
'assign', name=flag_var['name'],
value=cls.function_param.get(
'true_flag', cls.function_param['true'])))
lines.append(cls.format_function_param(
'assign', name=iter_var['name'],
value=cls.function_param.get(
'true_flag', cls.function_param['true'])))
# Declare/define input and output channels
for x in inputs:
lines += cls.write_channel_def('input',
requires_freeing=free_vars, **x)
for x in outputs:
lines += cls.write_channel_def('output',
requires_freeing=free_vars, **x)
# Receive inputs before loop
for x in inputs:
if x.get('outside_loop', False):
lines += cls.write_model_recv(x['channel'], x,
flag_var=flag_var)
# Loop
loop_lines = []
# Receive inputs
any_loop_inputs = False
loop_iter_var = iter_var
if copies > 1:
loop_iter_var = None
for x in inputs:
if not x.get('outside_loop', False):
any_loop_inputs = True
loop_lines += cls.write_model_recv(x['channel'], x,
flag_var=flag_var,
iter_var=loop_iter_var,
allow_failure=True)
# Prepare output array
if iter_function_over:
for v in iter_ivars:
if v['name'] in iter_function_over:
loop_lines += cls.write_finalize_iiter(v)
for v in iter_ovars:
if v['name'] in iter_function_over:
loop_lines += cls.write_initialize_oiter(v)
# Call model
loop_lines += cls.write_model_function_call(
model_function, model_flag, inputs, outputs,
outputs_in_inputs=outputs_in_inputs,
iter_function_idx=iter_function_idx)
# Finalize output array
if iter_function_over:
for v in iter_ovars:
if v['name'] in iter_function_over:
loop_lines += cls.write_finalize_oiter(v)
# Send outputs
for x in outputs:
if not x.get('outside_loop', False):
loop_lines += cls.write_model_send(x['channel'], x,
flag_var=flag_var)
loop_lines.append(cls.format_function_param(
'assign', name=iter_var['name'],
value=cls.function_param.get('false_flag',
cls.function_param['false'])))
# Add break if there are not any inputs inside the loop
if not any_loop_inputs:
loop_lines.append(cls.format_function_param(
'assign', name=flag_var['name'],
value=cls.function_param.get(
'false_flag', cls.function_param['false'])))
# Add loop in while block
flag_cond = cls.format_function_param('flag_cond',
default='{flag_var}',
flag_var=flag_var['name'])
lines += cls.write_while_loop(flag_cond, loop_lines)
# Send outputs after loop
for x in outputs:
if x.get('outside_loop', False):
lines += cls.write_model_send(x['channel'], x,
flag_var=flag_var)
# Free variables
for x in free_vars:
lines += cls.write_free(x)
# Add prints
if verbose_model: # pragma: debug
idx = len(lines) - 1
while (idx > nline_preamble):
if 'else' not in lines[idx]:
indent = ' ' * (len(lines[idx])
- len(lines[idx].lstrip()))
lines.insert(idx, indent + cls.format_function_param(
'print', message=("%s: line %d" % (model_file, idx))))
idx -= 1
# Wrap as executable with interface & model import
prefix = None
if 'interface' in cls.function_param:
ygglib = cls.interface_library
if ygglib in cls.internal_libraries:
ygglib = cls.internal_libraries[ygglib]['source']
if cls.interface_inside_exec:
lines.insert(0, cls.format_function_param(
'interface', interface_library=ygglib))
else:
prefix = [cls.format_function_param(
'interface', interface_library=ygglib)]
out = cls.write_executable(lines, prefix=prefix,
model_name=model_name,
imports={'filename': model_file,
'function': model_function})
if verbose: # pragma: debug
logger.info('\n' + '\n'.join(out))
else:
logger.debug('\n' + '\n'.join(out))
return out
@classmethod
def write_channel_decl(cls, var, **kwargs):
r"""Write a channel declaration.
Args:
var (dict): Information dictionary for the channel.
being declared.
**kwargs: Additional keyword arguments are passed to class's
write_declaration.
Returns:
list: The lines declaring the variable.
"""
out = []
if not cls.dont_declare_channel:
out = cls.write_declaration(
{'name': var['channel'], 'type': 'comm'}, **kwargs)
if (((var.get('datatype', None) is not None)
and ('{channel_type}' in cls.function_param['input']))):
var['channel_type'] = '%s_type' % var['channel']
out += cls.write_type_decl(
var['channel_type'], var['datatype'],
definitions=kwargs.get('definitions', None),
requires_freeing=kwargs.get('requires_freeing', None))
return out
@classmethod
def write_type_decl(cls, name, datatype, name_base=None,
requires_freeing=None, definitions=None,
no_decl=False):
r"""Get lines declaring the datatype within the language.
Args:
name (str): Name of variable that should be declared.
datatype (dict): Type definition.
requires_freeing (list, optional): List that variables requiring
freeing should be appended to. Defaults to None.
definitions (list, optional): Existing list that variable
definitions should be added to. Defaults to None if not
provided and definitions will be included in the returned
lines.
no_decl (bool, optional): If True, the variable is not
declared, but supporting variables will be. Defaults
to False.
Returns:
list: Lines required to define a type declaration.
"""
out = []
if name_base is None:
name_base = name
if datatype['type'] == 'array':
if 'items' in datatype:
assert(isinstance(datatype['items'], list))
out += cls.write_declaration(
{'name': '%s_items' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'dtype',
'length': len(datatype['items'])}},
definitions=definitions,
requires_freeing=requires_freeing)
for i, x in enumerate(datatype['items']):
# Prevent recusion
x_copy = copy.deepcopy(x)
x_copy.pop('items', None)
x_copy.pop('properties', None)
out += cls.write_type_decl(
None, x_copy,
name_base=('%s_item%d' % (name_base, i)),
definitions=definitions,
requires_freeing=requires_freeing,
no_decl=True)
elif datatype['type'] == 'object':
if 'properties' in datatype:
assert(isinstance(datatype['properties'], dict))
precision = 0
if datatype['properties']:
precision = max([len(k) for k in
datatype['properties'].keys()])
precision = max(80, precision)
out += cls.write_declaration(
{'name': '%s_keys' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'bytes',
'length': len(datatype['properties']),
'precision': precision}},
definitions=definitions,
requires_freeing=requires_freeing)
out += cls.write_declaration(
{'name': '%s_vals' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'dtype',
'length': len(datatype['properties'])}},
definitions=definitions,
requires_freeing=requires_freeing)
for i, (k, v) in enumerate(datatype['properties'].items()):
# Prevent recusion
v_copy = copy.deepcopy(v)
v_copy.pop('items', None)
v_copy.pop('properties', None)
out += cls.write_type_decl(
None, v_copy,
name_base=('%s_prop%d' % (name_base, i)),
requires_freeing=requires_freeing,
definitions=definitions,
no_decl=True)
elif datatype['type'] == 'ndarray':
if 'shape' in datatype:
out += cls.write_declaration(
{'name': '%s_shape' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'int',
'precision': 64, 'length': len(datatype['shape'])}},
definitions=definitions,
requires_freeing=requires_freeing)
elif datatype['type'] in (['ply', 'obj', '1darray', 'scalar',
'boolean', 'null', 'number', 'integer',
'string', 'class', 'function', 'instance',
'schema', 'any']
+ list(constants.VALID_TYPES.keys())):
pass
else: # pragma: debug
raise ValueError(("Cannot create %s version of type "
"'%s'") % (cls.language, datatype['type']))
if not no_decl:
out += cls.write_declaration(
{'name': name, 'type': 'dtype'})
return out
@classmethod
def write_type_def(cls, name, datatype, name_base=None,
use_generic=False):
r"""Get lines declaring the data type within the language.
Args:
name (str): Name of variable that definition should be stored in.
datatype (dict): Type definition.
use_generic (bool, optional): If True variables serialized
and/or deserialized by the type will be assumed to be
generic objects. Defaults to False.
Returns:
list: Lines required to define a type definition.
"""
out = []
fmt = None
keys = {}
if use_generic:
keys['use_generic'] = cls.function_param['true']
else:
keys['use_generic'] = cls.function_param['false']
typename = datatype['type']
if name_base is None:
name_base = name
if datatype['type'] == 'array':
if 'items' in datatype:
assert(isinstance(datatype['items'], list))
keys['nitems'] = len(datatype['items'])
keys['items'] = '%s_items' % name_base
if cls.zero_based:
idx_offset = 0
else:
idx_offset = 1
for i, x in enumerate(datatype['items']):
# Prevent recusion
x_copy = copy.deepcopy(x)
x_copy.pop('items', None)
x_copy.pop('properties', None)
out += cls.write_type_def(
cls.format_function_param(
'index', variable=keys['items'],
index=(i + idx_offset)), x_copy,
name_base=('%s_item%d' % (name_base, i)),
use_generic=use_generic)
else:
keys['nitems'] = 0
keys['items'] = cls.function_param['null']
keys['use_generic'] = cls.function_param['true']
elif datatype['type'] == 'object':
keys['use_generic'] = cls.function_param['true']
if 'properties' in datatype:
assert(isinstance(datatype['properties'], dict))
keys['nitems'] = len(datatype['properties'])
keys['keys'] = '%s_keys' % name_base
keys['values'] = '%s_vals' % name_base
if cls.zero_based:
idx_offset = 0
else:
idx_offset = 1
for i, (k, v) in enumerate(datatype['properties'].items()):
# Prevent recusion
v_copy = copy.deepcopy(v)
v_copy.pop('items', None)
v_copy.pop('properties', None)
out.append(cls.format_function_param(
'assign', value='\"%s\"' % k,
name=cls.format_function_param(
'index', variable=keys['keys'],
index=(i + idx_offset))))
out += cls.write_type_def(
cls.format_function_param(
'index', variable=keys['values'],
index=(i + idx_offset)), v_copy,
name_base=('%s_prop%d' % (name_base, i)),
use_generic=use_generic)
else:
keys['nitems'] = 0
keys['keys'] = cls.function_param['null']
keys['values'] = cls.function_param['null']
elif datatype['type'] in ['ply', 'obj']:
pass
elif datatype['type'] == '1darray':
for k in ['subtype', 'precision']:
keys[k] = datatype[k]
keys['precision'] = int(keys['precision'])
keys['length'] = datatype.get('length', '0')
keys['units'] = datatype.get('units', '')
elif datatype['type'] == 'ndarray':
for k in ['subtype', 'precision']:
keys[k] = datatype[k]
keys['precision'] = int(keys['precision'])
if 'shape' in datatype:
shape_var = '%s_shape' % name_base
if cls.zero_based:
idx_offset = 0
else:
idx_offset = 1
for i, x in enumerate(datatype['shape']):
out.append(cls.format_function_param(
'assign', value=x,
name=cls.format_function_param(
'index', variable=shape_var,
index=(i + idx_offset))))
keys['ndim'] = len(datatype['shape'])
keys['shape'] = shape_var
typename = 'ndarray_arr'
else:
keys['ndim'] = 0
keys['shape'] = cls.function_param['null']
keys['units'] = datatype.get('units', '')
elif (typename == 'scalar') or (typename in constants.VALID_TYPES):
keys['subtype'] = datatype.get('subtype', datatype['type'])
keys['units'] = datatype.get('units', '')
if keys['subtype'] in ['bytes', 'string', 'unicode']:
keys['precision'] = int(datatype.get('precision', 0))
else:
keys['precision'] = int(datatype['precision'])
typename = 'scalar'
elif datatype['type'] in ['boolean', 'null', 'number',
'integer', 'string']:
keys['type'] = datatype['type']
typename = 'default'
elif (typename in ['class', 'function']):
keys['type'] = typename
typename = 'pyobj'
elif typename in ['instance', 'any']:
keys['use_generic'] = cls.function_param['true']
typename = 'empty'
elif typename in ['schema']:
keys['use_generic'] = cls.function_param['true']
else: # pragma: debug
raise ValueError("Cannot create %s version of type '%s'"
% (cls.language, typename))
fmt = cls.format_function_param('init_type_%s' % typename, **keys)
out.append(cls.format_function_param('assign', name=name,
value=fmt))
return out
@classmethod
def write_channel_def(cls, key, datatype=None, **kwargs):
r"""Write an channel definition.
Args:
key (str): Entry in cls.function_param that should be used.
datatype (dict, optional): Data type associated with the channel.
Defaults to None and is ignored.
**kwargs: Additional keyword arguments are passed as parameters
to format_function_param.
Returns:
list: Lines required to declare and define an output channel.
"""
out = []
if (datatype is not None) and ('{channel_type}' in cls.function_param[key]):
kwargs['channel_type'] = '%s_type' % kwargs['channel']
out += cls.write_type_def(
kwargs['channel_type'], datatype,
use_generic=kwargs.get('use_generic', False))
dir_map = {'input': 'recv', 'output': 'send'}
try_keys = [dir_map[key] + '_converter', 'transform']
try_vals = []
if all([bool(kwargs.get(k, False)) for k in try_keys]): # pragma: debug
# TODO: Handling merger of the transforms in yaml or
# remove the *_converter options entirely
raise RuntimeError(("Transforms are specified in multiple "
"locations for this input: %s")
% str(try_keys))
for k in try_keys:
if k in kwargs:
v = kwargs[k]
if not isinstance(v, list):
v = [v]
try_vals += v
# This last transform is used because the others are assumed
# to be applied by the connection driver
if try_vals and isinstance(try_vals[-1], str):
try_key = '%s_%s' % (try_vals[-1], key)
if ((('python_interface' in cls.function_param)
and (try_key in cls.python_interface))):
kwargs['python_interface'] = cls.python_interface[try_key]
if ((('format_str' in kwargs)
and ('python_interface_format' in cls.function_param))):
key = 'python_interface_format'
kwargs['format_str'] = kwargs['format_str'].encode(
"unicode_escape").decode('utf-8')
else:
key = 'python_interface'
out += [cls.format_function_param(key, **kwargs)]
return out
@classmethod
def write_model_function_call(cls, model_function, flag_var, inputs, outputs,
outputs_in_inputs=None, on_failure=None,
format_not_flag_cond=None, format_flag_cond=None,
iter_function_idx=None):
r"""Write lines necessary to call the model function.
Args:
model_function (str): Handle of the model function that should be
called.
flag_var (str): Name of variable that should be used as a flag.
inputs (list): List of dictionaries describing inputs to the model.
outputs (list): List of dictionaries describing outputs from the model.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to the class attribute outputs_in_inputs.
on_failure (list, optional): Lines to be executed if the model
call fails. Defaults to an error message. This variable
is only used if flag_var is not None and outputs_in_inputs
is True.
format_not_flag_cond (str, optional): Format string that produces
a conditional expression that evaluates to False when the
model flag indicates a failure. Defaults to None and the
class's value for 'not_flag_cond' in function_param is used
if it exists. If it does not exist, format_flag_cond is used.
format_flag_cond (str, optional): Format string that produces
a conditional expression that evaluates to True when the
model flag indicates a success. Defaults to None and the
defaults class's value for 'flag_cond' in function_param is
used if it exists. If it does not exist, the flag is
directly evaluated as if it were a boolean.
iter_function_idx (dict, optional): Variable that serves as an
index to iterate over variables. Defaults to None.
Returns:
list: Lines required to carry out a call to a model function in
this language.
"""
if outputs_in_inputs is None: # pragma: debug
outputs_in_inputs = cls.outputs_in_inputs
func_inputs = cls.channels2vars(inputs)
func_outputs = cls.channels2vars(outputs)
if iter_function_idx:
for src in [func_inputs, func_outputs]:
for i, x in enumerate(src):
if 'iter_datatype' in x:
src[i] = dict(
x, datatype=x['iter_datatype'],
name=cls.format_function_param(
'index', variable=x['name'],
index=iter_function_idx['name'],
extra=x),
length_var=False)
if isinstance(flag_var, dict):
flag_var = flag_var['name']
out = cls.write_function_call(
model_function, inputs=func_inputs, outputs=func_outputs,
flag_var=flag_var, outputs_in_inputs=outputs_in_inputs)
if flag_var and outputs_in_inputs:
if (not format_flag_cond) and ('not_flag_cond' in cls.function_param):
flag_cond = cls.format_function_param(
'not_flag_cond', flag_var=flag_var,
replacement=format_not_flag_cond)
else: # pragma: debug
# flag_cond = '%s (%s)' % (
# cls.function_param['not'],
# cls.format_function_param(
# 'flag_cond', default='{flag_var}', flag_var=flag_var,
# replacement=format_flag_cond))
raise RuntimeError("Untested code below. Uncomment "
"at your own risk if you find "
"use case for it.")
if on_failure is None:
on_failure = [cls.format_function_param(
'error', error_msg="Model call failed.")]
out += cls.write_if_block(flag_cond, on_failure)
if iter_function_idx:
out = cls.write_for_loop(iter_function_idx['name'],
iter_function_idx['begin'],
iter_function_idx['end'],
out)
return out
@classmethod
def write_model_recv(cls, channel, recv_var, flag_var='flag',
iter_var=None, allow_failure=False,
alt_recv_function=None):
r"""Write a model receive call include checking the return flag.
Args:
channel (str): Name of variable that the channel being received from
was stored in.
recv_var (dict, list): Information of one or more variables that
receieved information should be stored in.
flag_var (str, optional): Name of flag variable that the flag should
be stored in. Defaults to 'flag',
iter_var (str, optional): Name of flag signifying when the
model is in it's first iteration. If allow_failure is
True and iter_var is provided, an error will be raised
if iter_var is True. Defaults to None.
allow_failure (bool, optional): If True, the returned lines will
call a break if the flag is False. Otherwise, the returned
lines will issue an error. Defaults to False.
alt_recv_function (str, optional): Alternate receive function
format string. Defaults to None and is ignored.
Returns:
list: Lines required to carry out a receive call in this language.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
recv_var_str = recv_var
if not isinstance(recv_var, str):
recv_var_par = cls.channels2vars(recv_var)
recv_var_str = cls.prepare_output_variables(
recv_var_par, in_inputs=cls.outputs_in_inputs,
for_yggdrasil=True)
else:
recv_var_par = cls.split_variables(recv_var_str)
expanded_recv_var = None
if (len(recv_var_par) > 1) and ('multiple_outputs' in cls.function_param):
expanded_recv_var = recv_var_str
recv_var_str = 'temp_%s' % recv_var_par[0]['name']
if isinstance(flag_var, dict):
flag_var = flag_var['name']
if isinstance(iter_var, dict):
iter_var = iter_var['name']
if cls.outputs_in_inputs:
inputs = [recv_var_str]
outputs = [flag_var]
else:
inputs = []
outputs = [flag_var, recv_var_str]
if cls.include_channel_obj:
inputs.insert(0, channel)
lines = cls.write_function_call(
cls.format_function_param('recv_function', channel=channel,
replacement=alt_recv_function),
inputs=inputs, outputs=outputs, include_arg_count=cls.include_arg_count)
if 'not_flag_cond' in cls.function_param:
flag_cond = cls.format_function_param('not_flag_cond',
flag_var=flag_var)
else:
flag_cond = '%s (%s)' % (
cls.function_param['not'],
cls.format_function_param('flag_cond', default='{flag_var}',
flag_var=flag_var))
fail_message = cls.escape_quotes(
"Could not receive %s." % recv_var_str)
if allow_failure:
fail_message = cls.escape_quotes(
'End of input from %s.' % recv_var_str)
if_block = [cls.format_function_param('print', message=fail_message),
cls.function_param.get('break', 'break')]
if iter_var is not None:
if_block = cls.write_if_block(
iter_var,
[cls.format_function_param(
'error', error_msg=cls.escape_quotes(
'No input from %s.' % recv_var_str))],
if_block)
else:
if_block = [cls.format_function_param('error', error_msg=fail_message)]
lines += cls.write_if_block(flag_cond, if_block)
# Check if single element should be expanded
if expanded_recv_var:
# lines.append(cls.format_function_param(
# 'print_generic', object=recv_var_str))
if 'expand_mult' in cls.function_param: # pragma: matlab
lines.append(cls.format_function_param(
'expand_mult', name=expanded_recv_var, value=recv_var_str))
elif 'assign_mult' in cls.function_param:
lines.append(cls.format_function_param(
'assign_mult', name=expanded_recv_var, value=recv_var_str))
else:
lines.append(cls.format_function_param(
'assign', name=expanded_recv_var, value=recv_var_str))
elif len(recv_var_par) == 1:
lines += cls.write_expand_single_element(recv_var_str)
return lines
@classmethod
def write_model_send(cls, channel, send_var, flag_var='flag',
allow_failure=False):
r"""Write a model send call include checking the return flag.
Args:
channel (str): Name of variable that the channel being sent to
was stored in.
send_var (dict, list): Information on one or more variables
containing information that will be sent.
flag_var (str, optional): Name of flag variable that the flag should
be stored in. Defaults to 'flag',
allow_failure (bool, optional): If True, the returned lines will
call a break if the flag is False. Otherwise, the returned
lines will issue an error. Defaults to False.
Returns:
list: Lines required to carry out a send call in this language.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
send_var_str = send_var
if not isinstance(send_var_str, str):
send_var_par = cls.channels2vars(send_var)
send_var_str = cls.prepare_input_variables(
send_var_par, for_yggdrasil=True)
if isinstance(flag_var, dict):
flag_var = flag_var['name']
if cls.include_channel_obj:
send_var_str = [channel, send_var_str]
lines = cls.write_function_call(
cls.format_function_param('send_function', channel=channel),
inputs=send_var_str,
outputs=flag_var, include_arg_count=cls.include_arg_count)
flag_cond = '%s (%s)' % (
cls.function_param['not'],
cls.format_function_param('flag_cond', default='{flag_var}',
flag_var=flag_var))
fail_message = cls.escape_quotes(
"Could not send %s." % send_var_str)
if allow_failure: # pragma: no cover
# This is not particularly useful, but is included for completion
if_block = [cls.format_function_param('print', message=fail_message),
cls.function_param.get('break', 'break')]
else:
if_block = [cls.format_function_param('error', error_msg=fail_message)]
lines += cls.write_if_block(flag_cond, if_block)
return lines
@classmethod
def write_print_var(cls, var, prefix_msg=None):
r"""Get the lines necessary to print a variable in this language.
Args:
var (dict): Variable information.
prefix_msg (str, optional): Message that should be printed
before the variable. Defaults to None and is ignored.
Returns:
list: Lines printing the specified variable.
"""
out = []
print_key = None
varname = var
if isinstance(var, dict):
varname = var['name']
typename = var.get(
'datatype',
{'type': var.get('type', None)}).get('type', None)
if ('print_%s' % typename) in cls.function_param:
print_key = ('print_%s' % typename)
elif 'print_generic' in cls.function_param:
print_key = 'print_generic'
elif 'print_generic' in cls.function_param:
print_key = 'print_generic'
if print_key:
if prefix_msg is not None:
out.append(cls.format_function_param(
'print', message=prefix_msg))
out += [cls.format_function_param(
print_key, object=varname)]
return out
@classmethod
def write_print_input_var(cls, var, **kwargs):
r"""Get the lines necessary to print an input variable in this
language.
Args:
var (dict): Variable information.
**kwargs: Additional keyword arguments are passed to write_print_var.
Returns:
list: Lines printing the specified variable.
"""
return cls.write_print_var(var, **kwargs)
@classmethod
def write_print_output_var(cls, var, in_inputs=False, **kwargs):
r"""Get the lines necessary to print an output variable in this
language.
Args:
var (dict): Variable information.
in_inputs (bool, optional): If True, the output variable
is passed in as an input variable to be populated.
Defaults to False.
**kwargs: Additional keyword arguments are passed to write_print_var.
Returns:
list: Lines printing the specified variable.
"""
return cls.write_print_var(var, **kwargs)
@classmethod
def write_function_def(cls, function_name, inputs=[], outputs=[],
input_var=None, output_var=None,
function_contents=[],
outputs_in_inputs=False,
opening_msg=None, closing_msg=None,
print_inputs=False, print_outputs=False,
skip_interface=False, function_keys=None,
verbose=False, **kwargs):
r"""Write a function definition.
Args:
function_name (str): Name fo the function being defined.
inputs (list, optional): List of inputs to the function.
Defaults to []. Ignored if input_var provided.
outputs (list, optional): List of outputs from the function.
Defaults to []. If not provided, no return call is
added to the function body. Ignored if output_var
provided.
input_var (str, optional): Full string specifying input in
the function definition. If not provided, this will be
created based on the contents of the inputs variable.
output_var (str, optional): Full string specifying output in
the function definition. If not provided, this will be
created based on the contents of the outputs variable.
function_contents (list, optional): List of lines comprising
the body of the function. Defaults to [].
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
opening_msg (str, optional): String that should be printed
before the function contents (and inputs if print_inputs
is True). Defaults to None and is ignored.
closing_msg (str, optional): String that should be printed
after the function contents (and outputs if print_outputs
is True). Defaults to None and is ignored.
print_inputs (bool, optional): If True, the input variables
will be printed before the function contents. Defaults
to False.
print_outputs (bool, optional): If True, the output variables
will be printed after the function contents. Defaults to
False.
skip_interface (bool, optional): If True, the line including
the interface will be skipped. Defaults to False.
function_keys (tuple, optional): 2 element tuple that
specifies the keys for the function_param entries that
should be used to begin & end a function definition.
Defaults to ('function_def_begin', function_def_end').
verbose (bool, optional): If True, the contents of the created file
are displayed. Defaults to False.
**kwargs: Additional keyword arguments are passed to
cls.format_function_param.
Returns:
list: Lines completing the function call.
Raises:
NotImplementedError: If the function_param attribute for the
class is not defined.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
if function_keys is None:
function_keys = ('function_def_begin', 'function_def_end')
out = []
interface_lines = []
if ('interface' in cls.function_param) and (not skip_interface):
ygglib = cls.interface_library
if ygglib in cls.internal_libraries:
ygglib = cls.internal_libraries[ygglib]['source']
interface_lines.append(cls.format_function_param(
'interface', interface_library=ygglib))
if not cls.interface_inside_exec:
out += interface_lines
flag_var = {}
if input_var is None:
input_var = cls.prepare_input_variables(
inputs, in_definition=True)
if output_var is None:
output_var = cls.prepare_output_variables(
outputs, in_inputs=outputs_in_inputs, in_definition=True)
print_input_lines = []
if print_inputs and inputs:
for x in inputs:
print_input_lines += cls.write_print_input_var(
x, prefix_msg=('INPUT[%s]:' % x['name']))
print_output_lines = []
if print_outputs and outputs:
for x in outputs:
print_output_lines += cls.write_print_output_var(
x, prefix_msg=('OUTPUT[%s]:' % x['name']),
in_inputs=outputs_in_inputs)
old_outputs = []
if outputs_in_inputs:
if output_var:
input_var = cls.prepare_input_variables(
[input_var, output_var])
flag_var = kwargs.get('flag_var', 'flag')
if isinstance(flag_var, str):
flag_var = {'name': flag_var}
flag_var.setdefault('datatype', 'flag')
flag_var.setdefault('value', cls.function_param.get(
'true_flag', cls.function_param['true']))
old_outputs = outputs
outputs = [flag_var]
output_var = cls.prepare_output_variables(outputs)
out.append(cls.format_function_param(
function_keys[0], function_name=function_name,
input_var=input_var, output_var=output_var, **kwargs))
if cls.interface_inside_exec:
out += [cls.function_param['indent'] + x
for x in interface_lines]
free_vars = []
if 'declare' in cls.function_param:
definitions = []
if not cls.types_in_funcdef:
for o in (inputs + old_outputs):
out += [cls.function_param['indent'] + x for
x in cls.write_declaration(
o, definitions=definitions,
requires_freeing=free_vars,
is_argument=True)]
for o in outputs:
out += [cls.function_param['indent'] + x for
x in cls.write_declaration(
o, definitions=definitions,
requires_freeing=free_vars)]
out += [cls.function_param['indent'] + x
for x in definitions]
if outputs_in_inputs:
out.append(cls.function_param['indent']
+ cls.format_function_param(
'assign', **flag_var))
if opening_msg:
out.append(cls.function_param['indent']
+ cls.format_function_param(
'print', message=opening_msg))
if print_inputs:
for x in print_input_lines:
out.append(cls.function_param['indent'] + x)
for x in function_contents:
out.append(cls.function_param['indent'] + x)
if print_outputs:
for x in print_output_lines:
out.append(cls.function_param['indent'] + x)
if closing_msg:
out.append(cls.function_param['indent']
+ cls.format_function_param(
'print', message=closing_msg))
# This is not currently used by the tests, but may be
# needed in the future
assert(not free_vars)
# for x in free_vars:
# out += [cls.function_param['indent'] + line
# for line in cls.write_free(x)]
if output_var and ('return' in cls.function_param):
out.append(cls.function_param['indent']
+ cls.format_function_param(
'return', output_var=output_var))
if function_keys[1] in cls.function_param:
out.append(cls.format_function_param(
function_keys[1], function_name=function_name))
else:
out.append(cls.function_param.get('block_end', ''))
if verbose: # pragma: debug
logger.info('\n' + '\n'.join(out))
else:
logger.debug('\n' + '\n'.join(out))
return out
@classmethod
def write_function_call(cls, function_name, inputs=[], outputs=[],
include_arg_count=False,
outputs_in_inputs=False, **kwargs):
r"""Write a function call.
Args:
function_name (str): Name of the function being called.
inputs (list, optional): List of inputs to the function.
Defaults to [].
outputs (list, optional): List of outputs from the function.
Defaults to [].
include_arg_count (bool, optional): If True, the count of input
arguments is included as the first argument. Defaults to
False.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
**kwargs: Additional keyword arguments are passed to
cls.format_function_param.
Returns:
list: Lines completing the function call.
"""
if outputs_in_inputs:
inputs = inputs + [cls.prepare_output_variables(
outputs, in_inputs=outputs_in_inputs)]
flag_var = kwargs.get('flag_var', None)
if (flag_var is None) and ('function_call_noout' not in cls.function_param):
flag_var = 'flag'
outputs = []
if flag_var:
outputs.append(flag_var)
kwargs.setdefault('input_var', cls.prepare_input_variables(inputs))
kwargs.setdefault('output_var', cls.prepare_output_variables(outputs))
nout = len(cls.split_variables(kwargs['output_var']))
if include_arg_count:
narg = len(cls.split_variables(kwargs['input_var']))
kwargs['input_var'] = cls.prepare_input_variables(
[str(narg), kwargs['input_var']])
if (nout == 0) and ('function_call_noout' in cls.function_param):
call_str = cls.format_function_param(
'function_call_noout', function_name=function_name, **kwargs)
else:
call_str = cls.format_function_param(
'function_call', default='{function_name}({input_var})',
function_name=function_name, **kwargs)
if nout == 0:
out = [call_str + cls.function_param.get('line_end', '')]
elif (nout > 1) and ('assign_mult' in cls.function_param):
out = [cls.format_function_param(
'assign_mult', name=kwargs['output_var'], value=call_str)]
else:
out = [cls.format_function_param(
'assign', name=kwargs['output_var'], value=call_str)]
return out
@classmethod
def write_executable_import(cls, model_name=None, **kwargs):
r"""Add import statements to executable lines.
Args:
**kwargs: Keyword arguments for import statement.
Returns:
list: Lines required to complete the import.
"""
# This code is currently unused, but may be needed in the
# future to import a dependency directly
# if ('filename' not in kwargs) and ('import_nofile' in cls.function_param):
# key = 'import_nofile'
# else:
# key = 'import'
# return [cls.format_function_param(key, **kwargs)]
out = []
if 'import' in cls.function_param:
out = [cls.format_function_param('import', **kwargs)]
return out
@classmethod
def write_executable(cls, lines, prefix=None, suffix=None,
function_definitions=None, imports=None,
model_name=None):
r"""Return the lines required to complete a program that will run
the provided lines.
Args:
lines (list): Lines of code to be wrapped as an executable.
prefix (list, optional): Lines of code that should proceed the
wrapped code. Defaults to None and is ignored. (e.g. C/C++
include statements).
suffix (list, optional): Lines of code that should follow the
wrapped code. Defaults to None and is ignored.
function_definitions (list, optional): Lines of code defining
functions that will beused by the code contained in lines.
Defaults to None and is ignored.
imports (list, optional): Kwargs for packages that should
be imported for use by the executable. Defaults to
None and is ignored.
model_name (str, optional): Name given to the model. Defaults to
None.
Returns:
lines: Lines of code wrapping the provided lines with the
necessary code to run it as an executable (e.g. C/C++'s main).
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
# Add imports
if imports is not None:
if not isinstance(imports, list):
imports = [imports]
import_lines = []
for kws in imports:
import_lines += cls.write_executable_import(**kws)
if prefix is None:
prefix = []
prefix += import_lines
# Add standard & user defined prefixes
if ((('exec_prefix' in cls.function_param)
and (cls.function_param['exec_prefix'] not in lines))):
out.append(cls.function_param['exec_prefix'])
out.append('')
if prefix is not None:
if not isinstance(prefix, (list, tuple)):
prefix = [prefix]
out += prefix
out.append('')
if (((not cls.function_param.get('functions_defined_last', False))
and (function_definitions is not None))):
out += function_definitions
out.append('')
# Add code with begin/end book ends
if ((('exec_begin' in cls.function_param)
and (cls.function_param['exec_begin'] not in '\n'.join(lines)))):
out.append(cls.function_param['exec_begin'])
if not isinstance(lines, (list, tuple)):
lines = [lines]
for x in lines:
out.append(cls.function_param['indent'] + x)
out.append(cls.function_param.get('exec_end',
cls.function_param.get(
'block_end', '')))
else:
out += lines
if out[-1]:
out.append('')
# Add standard & user defined suffixes
if suffix is not None:
if not isinstance(suffix, (list, tuple)):
suffix = [suffix]
out += suffix
out.append('')
if ((('exec_suffix' in cls.function_param)
and (cls.function_param['exec_suffix'] not in lines))):
out.append(cls.function_param['exec_suffix'])
out.append('')
if (((cls.function_param.get('functions_defined_last', False))
and (function_definitions is not None))): # pragma: matlab
out += function_definitions
out.append('')
if cls.max_line_width:
new_out = []
for iout in out:
new_out += cls.split_line(iout)
out = new_out
return out
@classmethod
def escape_quotes(cls, x):
r"""Escape quotes in a string.
Args:
x (str): String to escape quotes in.
Returns:
str: x with escaped quotes.
"""
out = x.replace('"', '\\\"')
out = out.replace("'", "\\\'")
return out
@classmethod
def split_line(cls, line, length=None, force_split=False):
r"""Split a line as close to (or before) a given character as
possible.
Args:
line (str): Line to split.
length (int, optional): Maximum length of split lines. Defaults
to cls.max_line_width if not provided.
force_split (bool, optional): If True, force a split to
occur at the specified length. Defauts to False.
Returns:
list: Set of lines resulting from spliting the provided line.
"""
out = []
if not line.lstrip():
return [line]
nindent = line.index(line.lstrip()[0])
block_end = cls.function_param['block_end'].lower()
if '\n' in line:
out = line.split('\n')
for i in range(1, len(out)):
if out[i].lstrip().lower().startswith(block_end):
nindent -= len(cls.function_param['indent'])
out[i] = (nindent * ' ') + out[i]
new_out = []
for x in out:
new_out += cls.split_line(x, length=length,
force_split=force_split)
return new_out
if length is None:
length = cls.max_line_width
if (length is None) or (len(line) < length):
return [line]
length_allow = (length - len(cls.function_param.get(
'continuation_before', '')))
if force_split:
isplit = length_allow
else:
isplit = line[:length_allow].rindex(' ') + 1
if (isplit < nindent + 1) or (isplit >= len(line)):
out = [line]
else:
out.append(line[:isplit] + cls.function_param.get(
'continuation_before', ''))
out += cls.split_line(
((nindent * ' ') + cls.function_param.get(
'continuation_after', '') + line[isplit:]),
length=length, force_split=force_split)
return out
@classmethod
def input2output(cls, var):
r"""Perform conversion necessary to turn a variable extracted from a
function definition from an input to an output.
Args:
var (dict): Variable definition.
Returns:
dict: Updated variable definition.
"""
return var
@classmethod
def output2input(cls, var, in_definition=True):
r"""Perform conversion necessary to turn an output variable
into an corresponding input that can be used to format a
function definition.
Args:
var (dict): Variable definition.
in_definition (bool, optional): If True, the returned
dictionary corresponds to an input variable in a
function definition. If False, the returned value
will correspond to an input to a function. Defaults to
True.
Returns:
dict: Updated variable definition.
"""
return var
@classmethod
def get_native_type(cls, **kwargs):
r"""Get the native type.
Args:
type (str, optional): Name of |yggdrasil| extended JSON
type or JSONSchema dictionary defining a datatype.
**kwargs: Additional keyword arguments may be used in determining
the precise declaration that should be used.
Returns:
str: The native type.
"""
if 'native_type' in kwargs:
return kwargs['native_type']
assert('json_type' not in kwargs)
json_type = kwargs.get('datatype', kwargs)
if isinstance(json_type, dict):
type_name = json_type.get('type', 'bytes')
else:
type_name = json_type
json_type = kwargs
if type_name == 'scalar':
type_name = json_type['subtype']
if (type_name == 'flag') and (type_name not in cls.type_map):
type_name = 'boolean'
return cls.type_map[type_name]
@classmethod
def get_json_type(cls, native_type):
r"""Get the JSON type from the native language type.
Args:
native_type (str): The native language type.
Returns:
str, dict: The JSON type.
"""
return cls.get_inverse_type_map()[native_type]
@classmethod
def write_finalize_iiter(cls, var):
r"""Get the lines necessary to finalize an input array for iteration.
Args:
var (dict, str): Name or information dictionary for the variable
finalized.
Returns:
list: The lines finalizing the variable.
"""
return []
@classmethod
def write_initialize_oiter(cls, var, value=None, requires_freeing=None):
r"""Get the lines necessary to initialize an array for iteration
output.
Args:
var (dict, str): Name or information dictionary for the variable
being initialized.
value (str, optional): Value that should be assigned to the
variable.
requires_freeing (list, optional): Existing list that variables
requiring freeing should be appended to. Defaults to None
and is ignored.
Returns:
list: The lines initializing the variable.
"""
return cls.write_initialize(var, value=value,
requires_freeing=requires_freeing)
@classmethod
def write_finalize_oiter(cls, var, value=None, requires_freeing=None):
r"""Get the lines necessary to finalize an array after iteration.
Args:
var (dict, str): Name or information dictionary for the variable
being initialized.
value (str, optional): Value that should be assigned to the
variable.
requires_freeing (list, optional): Existing list of variables
requiring freeing. Defaults to None and is ignored.
Returns:
list: The lines finalizing the variable.
"""
return []
@classmethod
def write_initialize(cls, var, value=None, requires_freeing=None):
r"""Get the code necessary to initialize a variable.
Args:
var (dict, str): Name or information dictionary for the variable
being declared.
value (str, optional): Value that should be assigned to the
variable after it is declared.
requires_freeing (list, optional): Existing list that variables
requiring freeing should be appended to. Defaults to None
and is ignored.
Returns:
list: The lines initializing the variable.
"""
out = []
if isinstance(var, str): # pragma: no cover
var = {'name': var}
if (value is None) and isinstance(var.get('datatype', False), dict):
init_type = 'init_%s' % var['datatype']['type']
free_type = 'free_%s' % var['datatype']['type']
if init_type in cls.function_param:
assert(free_type in cls.function_param)
# value = cls.format_function_param(init_type, **var['datatype'])
value = cls.function_param[init_type]
if requires_freeing is not None:
requires_freeing.append(var)
if value is not None:
out.append(cls.format_function_param(
'assign', name=var['name'], value=value))
return out
@classmethod
def write_declaration(cls, var, value=None, requires_freeing=None,
definitions=None, is_argument=False):
r"""Return the lines required to declare a variable with a certain
type.
Args:
var (dict, str): Name or information dictionary for the variable
being declared.
value (str, optional): Value that should be assigned to the
variable after it is declared.
requires_freeing (list, optional): Existing list that variables
requiring freeing should be appended to. Defaults to None
and is ignored.
definitions (list, optional): Existing list that variable
definitions should be added to. Defaults to None if not
provided and definitions will be included in the returned
lines.
dont_define (bool, optional): If True, the variable will not
be defined. Defaults to False.
is_argument (bool, optional): If True, the variable being
declared is an input argument. Defaults to False.
Returns:
list: The lines declaring the variable.
"""
if isinstance(var, str): # pragma: no cover
var = {'name': var}
type_name = cls.get_native_type(**var)
out = [cls.format_function_param('declare',
type_name=type_name,
variable=cls.get_name_declare(var))]
if is_argument:
return out
if definitions is None:
definitions = out
definitions += cls.write_initialize(var, value=value,
requires_freeing=requires_freeing)
return out
@classmethod
def get_name_declare(cls, var):
r"""Determine the name that should be used for declaration.
Args:
var (str, dict): Name of variable or dictionary of information.
Returns:
str: Modified name for declaration.
"""
if isinstance(var, str): # pragma: no cover
return var
assert(isinstance(var, dict))
out = var['name']
return out
@classmethod
def write_free(cls, var, **kwargs):
r"""Return the lines required to free a variable with a certain type.
Args:
var (dict, str): Name or information dictionary for the variable
being declared.
**kwargs: Additional keyword arguments are passed to format_function_param.
Returns:
list: The lines freeing the variable.
"""
if isinstance(var, str): # pragma: no cover
var = {'name': var}
out = []
if not var.get('dont_free', False):
if ((isinstance(var.get('datatype', False), dict)
and (('free_%s' % var['datatype']['type'])
in cls.function_param))):
out = [cls.format_function_param(
'free_%s' % var['datatype']['type'],
variable=var['name'], **kwargs)]
else:
out = [cls.format_function_param(
'free', variable=var['name'], **kwargs)]
return out
@classmethod
def write_assign_to_output(cls, dst_var, src_var, copy=False,
outputs_in_inputs=False, **kwargs):
r"""Write lines assigning a value to an output variable.
Args:
dst_var (str, dict): Name or information dictionary for
variable being assigned to.
src_var (str, dict): Name or information dictionary for
value being assigned to dst_var.
copy (bool, optional): If True, the assigned value is copied
during assignment. Defaults to False.
outputs_in_inputs (bool, optional): If True, outputs are passed
as input parameters. In some languages, this means that a
pointer or reference is passed (e.g. C) and so the assignment
should be to the memory indicated rather than the variable.
Defaults to False.
Returns:
list: Lines achieving assignment.
"""
datatype = None
if isinstance(dst_var, dict):
kwargs['name'] = dst_var['name']
datatype = dst_var['datatype']
else:
kwargs['name'] = dst_var
if isinstance(src_var, dict):
kwargs['value'] = src_var['name']
datatype = src_var['datatype']
else:
kwargs['value'] = src_var
if ((outputs_in_inputs and isinstance(dst_var, dict)
and isinstance(dst_var['datatype'], dict)
and ('copy_' + dst_var['datatype']['type']
in cls.function_param))):
copy = True
if copy:
if ((isinstance(datatype, dict)
and ('copy_' + datatype['type'] in cls.function_param))):
return [cls.format_function_param(
'copy_' + datatype['type'], **kwargs)]
else:
return [cls.format_function_param('assign_copy', **kwargs)]
else:
return [cls.format_function_param('assign', **kwargs)]
@classmethod
def write_expand_single_element(cls, output_var, add_cond=False):
r"""Write lines allowing extraction of the only element from a single
element array as a stand-alone variable if the variable is an array
and only has one element.
Args:
output_var (str): Name of the variable that should be conditionally
expanded.
add_cond (list, optional): Additional conditions that must be
satisfied for the array element to be extracted. Defaults to
False and is ignored.
Returns:
list: Lines added the conditional expansion of single element
arrays.
"""
if 'istype' not in cls.function_param:
return []
cond = ('(%s) %s (%s %s 1)' % (
cls.format_function_param('istype',
variable=output_var,
type=cls.type_map['array']),
cls.function_param.get('and', '&&'),
cls.format_function_param('len',
variable=output_var),
cls.function_param.get('equ', '==')))
if add_cond:
for x in add_cond:
cond += f" {cls.function_param.get("and", "&&")} {x}"
out = cls.write_if_block(
cond,
cls.format_function_param(
'assign', name=output_var,
value=cls.format_function_param(
'index', variable=output_var,
index=int(cls.function_param.get('first_index', 0)))))
return out
@classmethod
def split_variables(cls, var_str):
r"""Split variable string include individual variables.
Args:
var_str (str): String containing multiple variables.
Returns:
list: Split variables.
"""
out = []
if var_str:
pairs = [(r'\[', r'\]'),
(r'\(', r'\)'),
(r'\{', r'\}'),
(r"'", r"'"),
(r'"', r'"')]
regex_ele = r''
present = False
for p in pairs:
if not any([(str(ip)[-1] in var_str) for ip in p]):
continue
present = True
regex_ele += (r'(?:%s[.\n]*?%s)|' % p)
if present:
regex_ele += '(?:.+?)'
regex_ele = r'\s*(%s)\s*(?:,|$)' % regex_ele
out = [x.group(1) for x in re.finditer(regex_ele, var_str)]
else:
out = [x.strip() for x in var_str.split(',')]
return out
@classmethod
def prepare_variables(cls, vars_list, in_definition=False,
for_yggdrasil=False):
r"""Concatenate a set of input variables such that it can be passed as a
single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
input/output to/from a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying variables
in a function definition. Defaults to False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
name_list = []
if not isinstance(vars_list, list):
vars_list = [vars_list]
for x in vars_list:
if isinstance(x, str):
name_list.append(x)
else:
assert(isinstance(x, dict))
name_list.append(x['name'])
return ', '.join(name_list)
@classmethod
def prepare_input_variables(cls, vars_list, in_definition=False,
for_yggdrasil=False):
r"""Concatenate a set of input variables such that it can be passed as a
single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
input to a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying input
variables in a function definition. Defaults to False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
return cls.prepare_variables(vars_list, in_definition=in_definition,
for_yggdrasil=for_yggdrasil)
@classmethod
def prepare_output_variables(cls, vars_list, in_definition=False,
in_inputs=False, for_yggdrasil=False):
r"""Concatenate a set of output variables such that it can be passed as
a single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
output from a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying output
variables in a function definition. Defaults to False.
in_inputs (bool, optional): If True, the output variables should
be formated to be included as input variables. Defaults to
False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
if in_inputs:
vars_list = [cls.output2input(x, in_definition=in_definition)
for x in vars_list]
out = cls.prepare_variables(vars_list, in_definition=in_definition,
for_yggdrasil=for_yggdrasil)
if isinstance(vars_list, list) and (len(vars_list) > 1):
if in_definition and ('multiple_outputs_def' in cls.function_param):
out = cls.format_function_param('multiple_outputs_def', outputs=out)
elif 'multiple_outputs' in cls.function_param:
out = cls.format_function_param('multiple_outputs', outputs=out)
return out
@classmethod
def write_if_block(cls, cond, block_contents, else_block_contents=False):
r"""Return the lines required to complete a conditional block.
Args:
cond (str): Conditional that should determine block execution.
block_contents (list): Lines of code that should be executed inside
the block.
else_block_contents (list, optional): Lines of code that should be
executed inside the else clause of the block. Defaults to False
if not provided and an else clause is omitted.
Returns:
list: Lines of code performing conditional execution of a block.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
if not isinstance(cond, list):
cond = [cond]
block_contents = [block_contents]
assert(len(cond) == len(block_contents))
for i, (icond, iblock_contents) in enumerate(zip(cond, block_contents)):
if i == 0:
out.append(cls.format_function_param('if_begin', cond=icond))
else:
out.append(cls.format_function_param('if_elif', cond=icond))
if not isinstance(iblock_contents, (list, tuple)):
iblock_contents = [iblock_contents]
for x in iblock_contents:
out.append(cls.function_param['indent'] + x)
if else_block_contents:
out.append(cls.format_function_param('if_else'))
if not isinstance(else_block_contents, (list, tuple)):
else_block_contents = [else_block_contents]
for x in else_block_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('if_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def write_for_loop(cls, iter_var, iter_begin, iter_end, loop_contents):
r"""Return the lines required to complete a for loop.
Args:
iter_var (str): Name of variable that iterator should use.
iter_begin (int): Beginning of iteration.
iter_end (int): End of iteration.
loop_contents (list): Lines of code that should be executed inside
the loop.
Returns:
list: Lines of code performing a loop.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
# Opening for statement line
out.append(cls.format_function_param('for_begin', iter_var=iter_var,
iter_begin=iter_begin,
iter_end=iter_end))
# Indent loop contents
if not isinstance(loop_contents, (list, tuple)):
loop_contents = [loop_contents]
for x in loop_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('for_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def write_while_loop(cls, cond, loop_contents):
r"""Return the lines required to complete a for loop.
Args:
cond (str): Conditional that should determine loop execution.
loop_contents (list): Lines of code that should be executed inside
the loop.
Returns:
list: Lines of code performing a loop.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
# Opening for statement line
out.append(cls.format_function_param('while_begin', cond=cond))
# Indent loop contents
if not isinstance(loop_contents, (list, tuple)):
loop_contents = [loop_contents]
for x in loop_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('while_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def write_try_except(cls, try_contents, except_contents, error_var='e',
error_type=None):
r"""Return the lines required to complete a try/except block.
Args:
try_contents (list): Lines of code that should be executed inside
the try block.
except_contents (list): Lines of code that should be executed inside
the except block.
error_var (str, optional): Name of variable where the caught error
should be stored. Defaults to 'e'.
error_type (str, optional): Name of error type that should be caught.
If not provided, defaults to None and will be set based on the
class function_param entry for 'try_error_type'.
Returns:
Lines of code perfoming a try/except block.
"""
if (cls.function_param is None) or ('try_begin' not in cls.function_param):
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
if error_type is None:
error_type = cls.function_param.get('try_error_type', None)
out = []
# Try block contents
if not isinstance(try_contents, (list, tuple)):
try_contents = [try_contents]
out.append(cls.function_param['try_begin'])
for x in try_contents:
out.append(cls.function_param['indent'] + x)
# Except block contents
if not isinstance(except_contents, (list, tuple)):
except_contents = [except_contents]
out.append(cls.format_function_param('try_except', error_var=error_var,
error_type=error_type))
for x in except_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('try_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def get_testing_options(cls):
r"""Method to return a dictionary of testing options for this class.
Returns:
dict: Dictionary of variables to use for testing. Key/value pairs:
kwargs (dict): Keyword arguments for driver instance.
deps (list): Dependencies to install.
"""
out = dict(
kwargs={}, deps=[],
write_function_def_params=[
{'inputs': [{'name': 'x', 'value': 1.0,
'datatype': {'type': 'float',
'precision': 32,
'units': 'cm'}}],
'outputs': [{'name': 'y',
'datatype': {'type': 'float',
'precision': 32,
'units': 'cm'}}]}],
split_lines=[('abcdef', {'length': 3, 'force_split': True},
['abc', 'def']),
(' abc', {'length': 3, 'force_split': True},
[' abc'])])
return out
|
import os
import re
import sys
import copy
import logging
import warnings
import subprocess
import shutil
import uuid
import tempfile
import asyncio
from collections import OrderedDict
from pprint import pformat
from yggdrasil import platform, tools, languages, multitasking, constants
from yggdrasil.components import import_component
from yggdrasil.drivers.Driver import Driver
from yggdrasil.metaschema.datatypes import is_default_typedef
from queue import Empty
logger = logging.getLogger(__name__)
_map_language_ext = OrderedDict()
def remove_product(product, check_for_source=False, **kwargs):
r"""Delete a single product after checking that the product is not (or
does not contain, in the case of directories), source files.
Args:
product (str): Full path to a file or directory that should be
removed.
check_for_source (bool, optional): If True, the specified product
will be checked to ensure that no source files are present. If
a source file is present, a RuntimeError will be raised.
Defaults to False.
**kwargs: Additional keyword arguments are passed to tools.remove_path.
Raises:
RuntimeError: If the specified product is a source file and
check_for_source is False.
RuntimeError: If the specified product is a directory that contains
a source file and check_for_source is False.
RuntimeError: If the product cannot be removed.
"""
tools.import_all_modules('yggdrasil.drivers')
source_keys = list(_map_language_ext.keys())
if '.exe' in source_keys: # pragma: windows
source_keys.remove('.exe')
if check_for_source:
if os.path.isdir(product):
ext_tuple = tuple(source_keys)
for root, dirs, files in os.walk(product):
for f in files:
if f.endswith(ext_tuple):
raise RuntimeError(("%s contains a source file "
"(%s)") % (product, f))
elif os.path.isfile(product):
ext = os.path.splitext(product)[-1]
if ext in source_keys:
raise RuntimeError("%s is a source file." % product)
tools.remove_path(product, **kwargs)
def remove_products(products, source_products):
r"""Delete products produced during the process of running the model.
Args:
products (list): List of products that should be removed after
checking that they are not source files.
source_products (list): List of products that should be removed
without checking that they are not source files.
"""
for p in source_products:
remove_product(p)
for p in products:
remove_product(p, check_for_source=True)
class ModelDriver(Driver):
r"""Base class for Model drivers and for running executable based models.
Args:
name (str): Unique name used to identify the model. This will
be used to report errors associated with the model.
args (str or list): The path to the file containing the model
program that will be run by the driver for the model's language
and/or a list of arguments that should be passed as input to the
model program or language executable (e.g. source code or
configuration file for a domain specific language).
products (list, optional): Paths to files created by the model that
should be cleaned up when the model exits. Entries can be absolute
paths or paths relative to the working directory. Defaults to [].
function (str, optional): If provided, an integrated model is
created by wrapping the function named here. The function must be
located within the file specified by the source file listed in the
first argument. If not provided, the model must contain it's own
calls to the |yggdrasil| interface.
iter_function_over (array, optional): Variable(s) that should be
received or sent as an array, but iterated over. Defaults to an
empty array and is ignored.
source_products (list, optional): Files created by running the model
that are source files. These files will be removed without checking
their extension so users should avoid adding files to this list
unless they are sure they should be deleted. Defaults to [].
is_server (bool, dict, optional): If `True`, the model is assumed to be a
server for one or more client models and an instance of
:class:`yggdrasil.drivers.ServerDriver` is started. The
corresponding channel that should be passed to the yggdrasil API
will be the name of the model. If is_server is a dictionary, it
should contain an 'input' key and an 'output' key. These are
required to be the names of existing input and output channels in
the model that will be co-opted by the server. (Note: This requires
that the co-opted output channel's send method is called once for
each time the co-opted input channel's recv method is called. If
used with the `function` parameter, `is_server` must be a dictionary.
Defaults to False.
client_of (str, list, optional): The names of one or more models that
this model will call as a server. If there are more than one, this
should be specified as a sequence collection (list). The
corresponding channel(s) that should be passed to the yggdrasil API
will be the name of the server model joined with the name of the
client model with an underscore `<server_model>_<client_model>`.
There will be one channel created for each server the model is a
client of. Defaults to empty list. Use of `client_of` with `function`
is not currently supported.
timesync (bool, str, optional): If set, the model is assumed to
call a send then receive of the state at each timestep
for syncronization with other models that are also
integrating in time. If a string is provided, it is assumed
to be the name of the server that will handle timestep
synchronization. If a boolean is provided, the name of the
server will be assumed to be 'timestep'. Defaults to False.
overwrite (bool, optional): If True, any existing model products
(compilation products, wrapper scripts, etc.) are removed prior to
the run. If False, the products are not removed. Defaults to True.
Setting this to False can improve the performance, particularly for
models that take a long time to compile, but this should only be
done once the model has been fully debugged to ensure that each run
is tested on a clean copy of the model. The value of this keyword
also determines whether or not products are removed after a run.
preserve_cache (bool, optional): If True model products will be kept
following the run, otherwise all products will be cleaned up.
Defaults to False. This keyword is superceeded by overwrite.
with_strace (bool, optional): If True, the command is run with strace (on
Linux) or dtrace (on MacOS). Defaults to False.
strace_flags (list, optional): Flags to pass to strace (or dtrace).
Defaults to [].
with_valgrind (bool, optional): If True, the command is run with valgrind.
Defaults to False.
valgrind_flags (list, optional): Flags to pass to valgrind. Defaults to [].
model_index (int, optional): Index of model in list of models being run.
Defaults to 0.
copy_index (int, optional): Index of model in set of copies. Defaults
to -1 indicating there is only one copy of the model.
outputs_in_inputs (bool, optional): If True, outputs from wrapped model
functions are passed by pointer as inputs for modification and the
return value will be a flag. If False, outputs are limited to
return values. Defaults to the value of the class attribute
outputs_in_inputs.
logging_level (str, optional): The level of logging messages that should
be displayed by the model. Defaults to the logging level as
determined by the configuration file and environment variables.
allow_threading (bool, optional): If True, comm connections will be set up
so that the model-side comms can be used by more than one thread.
Defaults to False.
copies (int, optional): The number of copies of the model that should be
created. Defaults to 1.
repository_url (str, optional): URL for the git repository containing
the model source code. If provided, relative paths in the model
YAML definition will be considered relative to the repository root
directory.
repository_commit (str, optional): Commit that should be checked out
in the model repository specified by repository_url. If not
provided, the most recent commit on the default branch will be used.
description (str, optional): Description of the model. This parameter
is only used in the model repository or when providing the model
as a service.
contact_email (str, optional): Email address that should be used to
contact the maintainer of the model. This parameter is only used
in the model repository.
validation_command (str, optional): Path to a validation command that
can be used to verify that the model ran as expected. A non-zero
return code is taken to indicate failure.
dependencies (list, optional): A list of packages required by the
model that are written in the same language as the model. If the
package requires dependencies outside the language of the model.
use the additional_dependencies parameter to provide them. If you
need a version of the package from a specific package manager,
a mapping with 'package' and 'package_manager' fields can be
provided instead of just the name of the package.
additional_dependencies (dict, optional): A mapping between languages
and lists of packages in those languages that are required by the
model.
**kwargs: Additional keyword arguments are passed to parent class.
Class Attributes:
language (str): Primary name for the programming language that this
compiler should be used for. [REQUIRED]
language_aliases (list): Additional/alternative names that the language
may be known by.
language_ext (list): Extensions for programs written in the target
language. [REQUIRED]
base_languages (list): Other programming languages that this driver
and the interpreter for the target language are dependent on (e.g.
Matlab models require Python).
executable_type (str): 'compiler' or 'interpreter' to indicate the type
of the executable for the language. [AUTOMATED]
interface_library (list): Name of the library containing the yggdrasil
interface for the target language. [REQUIRED]
interface_directories (list): Directories containing code in the
interface library for the target language.
interface_dependencies (list): List of names of libraries that are
required to use the interface on the current platform. This dosn't
include libraries required by specific communication types which are
described by supported_comm_options.
supported_comms (list): Name of comms supported in the target language.
[REQUIRED]
supported_comm_options (dict): Options for the supported comms like the
platforms they are available on and the external libraries required
to use them. [REQUIRED]
external_libraries (dict): Information on external libraries required
for running models in the target language using yggdrasil.
internal_libraries (dict): Information on internal libraries required
for running models in the target language using yggdrasil.
type_map (dict): Mapping of |yggdrasil| extended JSON types to
datatypes in the target programming language. [REQUIRED]
function_param (dict): Options specifying how different operations
would be encoded in the target language (e.g. if statements, for
loops, while loops). [REQUIRED]
version_flags (list): Flags that should be called with the language
executable to determine the version of the compiler/interpreter.
Defaults to ['--version'].
outputs_in_inputs (bool): If True, outputs are passed by pointer as
inputs for modification and the return value should be a flag.
Defaults to False.
include_arg_count (bool): If True, the number of arguments passed
to send/recv calls is prepended to the arguments to the function.
Defaults to False.
include_channel_obj (bool): If True, the channel object is passed as
input to the send/recv calls (after the argument count if it is
also present due to include_arg_count being True). Defaults to
False.
is_typed (bool): True if the language is typed, False otherwise.
brackets (tuple): A pair of opening and clossing characters that
are used by the language to mark blocks. Set to None and
ignored by default.
no_executable (bool): True if there is not an executable associated
with the language driver. Defaults to False.
comms_implicit (bool): True if the comms installed for this driver
are not explicitly defined (depend on input parameters). Defaults
to False.
Attributes:
args (list): Argument(s) for running the model on the command line.
model_file (str): Full path to the model executable or interpretable
script.
model_args (list): Runtime arguments for running the model on the
command line.
model_src (str): Full path to the model source code. For interpreted
languages, this will be the same as model_file.
model_function_info (dict): Parameters recovered by parsing the
provided model function definition.
overwrite (bool): If True, any existing compilation products will be
overwritten by compilation and cleaned up following the run.
Otherwise, existing products will be used and will remain after
the run.
products (list): Files created by running the model. This includes
compilation products such as executables and/or object files.
source_products (list): Files created by running the model that
are source files. These files will be removed without checking
their extension so users should avoid adding files to this list
unless they are sure they should be deleted.
wrapper_products (list): Files created in order to wrap the model.
process (:class:`yggdrasil.tools.YggPopen`): Process used to run
the model.
function (str): The name of the model function that should be wrapped.
iter_function_over (array): Variable(s) that should be received or
sent as an array, but iterated over.
is_server (bool, dict): If True, the model is assumed to be a server
and an instance of :class:`yggdrasil.drivers.ServerDriver` is
started. If a dict, the input/output channels with the specified
names in the dict will be replaced with a server.
client_of (list): The names of server models that this model is a
client of.
timesync (str): If set, the name of the server performing
timestep synchronization for the model.
with_strace (bool): If True, the command is run with strace or dtrace.
strace_flags (list): Flags to pass to strace/dtrace.
with_valgrind (bool): If True, the command is run with valgrind.
valgrind_flags (list): Flags to pass to valgrind.
model_index (int): Index of model in list of models being run.
copy_index (int): Index of model in set of copies.
modified_files (list): List of pairs of originals and copies of files
that should be restored during cleanup.
allow_threading (bool): If True, comm connections will be set up so that
the model-side comms can be used by more than one thread.
copies (int): The number of copies of the model that should be created.
repository_url (str): URL for the git repository containing the model
source code. If provided, relative paths in the model YAML
definition will be considered relative to the repository root
directory.
repository_commit (str): Commit that should be checked out in the
model repository specified by repository_url.
description (str): Description of the model. This parameter is only
used in the model repository or when providing the model as a
service.
contact_email (str): Email address that should be used to contact the
maintainer of the model. This parameter is only used in the model
repository.
validation_command (str): Path to a validation command that can be
used to verify that the model ran as expected. A non-zero return
code is taken to indicate failure.
dependencies (list): A list of packages required by the model that are
written in the same language as the model. If the package requires
dependencies outside the language of the model, use the
additional_dependencies parameter to provide them. If you need a
version of the package from a specific package manager, a mapping
with 'package' and 'package_manager' fields can be provided
instead of just the name of the package.
additional_dependencies (dict): A mapping between languages and lists
of packages in those languages that are required by the model.
Raises:
RuntimeError: If both with_strace and with_valgrind are True.
"""
_schema_type = 'model'
_schema_subtype_key = 'language'
_schema_required = ['name', 'language', 'args', 'working_dir']
_schema_properties = {
'name': {'type': 'string'},
'language': {'type': 'string', 'default': 'executable',
'description': (
'The programming language that the model '
'is written in. A list of available '
'languages can be found :ref:`here <'
'schema_table_model_subtype_rst>`.')},
'args': {'type': 'array',
'items': {'type': 'string', 'minLength': 1}},
'inputs': {'type': 'array', 'default': [],
'items': {'$ref': '#/definitions/comm'},
'description': (
'Zero or more channels carrying input to the model. '
'A full description of channel entries and the '
'options available for channels can be found '
':ref:`here<yaml_comm_options>`.')},
'outputs': {'type': 'array', 'default': [],
'items': {'$ref': '#/definitions/comm'},
'description': (
'Zero or more channels carrying output from the '
'model. A full description of channel entries and '
'the options available for channels can be found '
':ref:`here<yaml_comm_options>`.')},
'env': {'type': 'object', 'default': {},
'additional_properties': {'type': 'string'}},
'products': {'type': 'array', 'default': [],
'items': {'type': 'string'}},
'source_products': {'type': 'array', 'default': [],
'items': {'type': 'string'}},
'working_dir': {'type': 'string'},
'overwrite': {'type': 'boolean'},
'preserve_cache': {'type': 'boolean', 'default': False},
'function': {'type': 'string'},
'iter_function_over': {'type': 'array', 'default': [],
'items': {'type': 'string'}},
'is_server': {'anyOf': [{'type': 'boolean'},
{'type': 'object',
'properties': {'input': {'type': 'string'},
'output': {'type': 'string'}},
'additionalProperties': False}],
'default': False},
'client_of': {'type': 'array', 'items': {'type': 'string'},
'default': []},
'timesync': {
'anyOf': [
{'type': 'boolean'}, {'type': 'string'},
{'type': 'object',
'required': ['name'],
'properties': {
'name': {'type': 'string', 'default': 'timesync'},
'inputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]},
'outputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]}}},
{'type': 'array',
'items': {
'anyOf': [
{'type': 'string'},
{'type': 'object',
'required': ['name'],
'properties': {
'name': {'type': 'string',
'default': 'timesync'},
'inputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]},
'outputs': {'anyOf': [
{'type': 'string'},
{'type': 'array',
'items': {'type': 'string'}}]}}}]}}],
'default': False},
'with_strace': {'type': 'boolean', 'default': False},
'strace_flags': {'type': 'array',
'default': ['-e', 'trace=memory'],
'items': {'type': 'string'}},
'with_valgrind': {'type': 'boolean', 'default': False},
'valgrind_flags': {'type': 'array',
'default': ['--leak-check=full',
'--show-leak-kinds=all'], # '-v'
'items': {'type': 'string'}},
'outputs_in_inputs': {'type': 'boolean'},
'logging_level': {'type': 'string', 'default': ''},
'allow_threading': {'type': 'boolean'},
'copies': {'type': 'integer', 'default': 1, 'minimum': 1},
'repository_url': {'type': 'string'},
'repository_commit': {'type': 'string'},
'description': {'type': 'string'},
'contact_email': {'type': 'string'},
'validation_command': {'type': 'string'},
'dependencies': {
'type': 'array',
'items': {'oneOf': [
{'type': 'string'},
{'type': 'object',
'required': ['package'],
'properties': {
'package': {'type': 'string'},
'package_manager': {'type': 'string'},
'arguments': {'type': 'string'}},
'additionalProperties': False}]}},
'additional_dependencies': {
'type': 'object',
'additionalProperties': {
'type': 'array',
'items': {'oneOf': [
{'type': 'string'},
{'type': 'object',
'required': ['package'],
'properties': {
'package': {'type': 'string'},
'package_manager': {'type': 'string'},
'arguments': {'type': 'string'}},
'additionalProperties': False}]}}}}
_schema_excluded_from_class = ['name', 'language', 'args', 'working_dir']
_schema_excluded_from_class_validation = ['inputs', 'outputs']
language = None
language_ext = None
language_aliases = []
base_languages = []
executable_type = None
interface_library = None
interface_directories = []
interface_dependencies = []
supported_comms = []
supported_comm_options = {}
external_libraries = {}
internal_libraries = {}
type_map = None
inverse_type_map = None
function_param = None
version_flags = ['--version']
full_language = True
outputs_in_inputs = False
include_arg_count = False
include_channel_obj = False
is_typed = False
types_in_funcdef = True
interface_inside_exec = False
dont_declare_channel = False
is_dsl = False
brackets = None
zero_based = True
max_line_width = None
no_executable = False
comms_implicit = False
python_interface = {'table_input': 'YggAsciiTableInput',
'table_output': 'YggAsciiTableOutput',
'array_input': 'YggArrayInput',
'array_output': 'YggArrayOutput',
'pandas_input': 'YggPandasInput',
'pandas_output': 'YggPandasOutput'}
_library_cache = {}
_config_keys = []
_config_attr_map = []
_executable_search_dirs = None
_disconnect_attr = (Driver._disconnect_attr
+ ['queue', 'queue_thread',
'event_process_kill_called',
'event_process_kill_complete',
'model_process'])
_mpi_tags = {'ENV': 1,
'START': 2,
'STOP_RANK0': 3, # Stopped by partner
'STOP_RANKX': 4, # Stopped by root
'BUILDFILE': 5,
'LOCK_BUILDFILE': 6,
'UNLOCK_BUILDFILE': 7}
def __init__(self, name, args, model_index=0, copy_index=-1, clients=[],
preparsed_function=None, outputs_in_inputs=None,
mpi_rank=0, mpi_tag_start=None, **kwargs):
self._inv_mpi_tags = {v: k for k, v in self._mpi_tags.items()}
self.model_outputs_in_inputs = outputs_in_inputs
self.preparsed_function = preparsed_function
super(ModelDriver, self).__init__(name, **kwargs)
if self.overwrite is None:
self.overwrite = (not self.preserve_cache)
# Setup process things
self.model_process = None
self.queue = multitasking.Queue()
self.queue_thread = None
self.event_process_kill_called = multitasking.Event()
self.event_process_kill_complete = multitasking.Event()
# Strace/valgrind
if self.with_strace and self.with_valgrind:
raise RuntimeError("Trying to run with strace and valgrind.")
if (((self.with_strace or self.with_valgrind)
and platform._is_win)): # pragma: windows
raise RuntimeError("strace/valgrind options invalid on windows.")
self.model_index = model_index
self.copy_index = copy_index
self.clients = clients
self.env_copy = ['LANG', 'PATH', 'USER']
self._exit_line = b'EXIT'
for k in self.env_copy:
if k in os.environ:
self.env[k] = os.environ[k]
if not self.is_installed():
raise RuntimeError("%s is not installed" % self.language)
self.raw_model_file = None
self.model_function_file = None
self.model_function_info = None
self.model_function_inputs = None
self.model_function_outputs = None
self.model_file = None
self.model_args = []
self.model_dir = None
self.model_src = None
self.args = args
self.modified_files = []
self.wrapper_products = []
self._mpi_comm = False
self._mpi_rank = 0
self._mpi_size = 1
self._mpi_requests = {}
self._mpi_tag = (len(self._mpi_tags) * self.model_index)
if mpi_tag_start is not None:
self._mpi_tag += mpi_tag_start
if multitasking._on_mpi:
self._mpi_comm = multitasking.MPI.COMM_WORLD
self._mpi_rank = self._mpi_comm.Get_rank()
self._mpi_size = self._mpi_comm.Get_size()
self._mpi_partner_rank = mpi_rank
# Update for function
if self.function:
args = [self.init_from_function(args)]
# Parse arguments
self.debug(str(args))
self.parse_arguments(args)
assert(self.model_file is not None)
# Remove products
if self.overwrite:
self.remove_products()
# Write wrapper
if self.function:
self.wrapper_products.append(args[0])
self.wrapper_products += self.write_wrappers()
# Install dependencies
if self.dependencies:
self.install_model_dependencies(self.dependencies)
if self.additional_dependencies:
for language, v in self.additional_dependencies.items():
drv = import_component('model', language)
drv.install_model_dependencies(v)
@staticmethod
def before_registration(cls):
r"""Operations that should be performed to modify class attributes prior
to registration including things like platform dependent properties and
checking environment variables for default settings.
"""
Driver.before_registration(cls)
cls.inverse_type_map = None
cls._language = cls.language
cls._language_aliases = cls.language_aliases
if (((cls.language_ext is not None)
and (not isinstance(cls.language_ext, (list, tuple))))):
cls.language_ext = [cls.language_ext]
@staticmethod
def after_registration(cls, cfg=None, second_pass=False):
r"""Operations that should be performed to modify class attributes after
registration. For compiled languages this includes selecting the
default compiler. The order of precedence is the config file 'compiler'
option for the language, followed by the environment variable set by
_compiler_env, followed by the existing class attribute.
Args:
cfg (YggConfigParser, optional): Config class that should
be used to set options for the driver. Defaults to
None and yggdrasil.config.ygg_cfg is used.
second_pass (bool, optional): If True, the class as already
been registered. Defaults to False.
"""
if cfg is None:
from yggdrasil.config import ygg_cfg
cfg = ygg_cfg
cfg.reload()
Driver.after_registration(cls)
cls.cfg = cfg
for x in cls._config_attr_map:
ka = x['attr']
k0 = x.get('key', ka)
setattr(cls, ka, cls.cfg.get(cls.language, k0,
getattr(cls, ka)))
@staticmethod
def finalize_registration(cls):
r"""Operations that should be performed after a class has been fully
initialized and registered."""
global _map_language_ext
for x in cls.get_language_ext():
if x not in _map_language_ext:
_map_language_ext[x] = []
_map_language_ext[x].append(cls.language)
@classmethod
def mpi_partner_init(cls, self):
r"""Actions initializing an MPIPartnerModel."""
pass
@classmethod
def mpi_partner_cleanup(cls, self):
r"""Actions cleaning up an MPIPartnerModel."""
pass
@classmethod
def get_inverse_type_map(cls):
r"""Get the inverse type map.
Returns:
dict: Mapping from native type to JSON type.
"""
if cls.inverse_type_map is None:
cls.inverse_type_map = {}
for k, v in cls.type_map.items():
if k != 'flag':
cls.inverse_type_map[v] = k
return cls.inverse_type_map
@classmethod
def get_language_for_source(cls, fname, languages=None, early_exit=False,
**kwargs):
r"""Determine the language that can be used with the provided source
file(s). If more than one language applies to a set of multiple files,
the language that applies to the most files is returned.
Args:
fname (str, list): The full path to one or more files. If more than
one
languages (list, optional): The list of languages that are acceptable.
Defaults to None and any language will be acceptable.
early_exit (bool, optional): If True, the first language identified
will be returned if fname is a list of files. Defaults to False.
**kwargs: Additional keyword arguments are passed to recursive calls.
Returns:
str: The language that can operate on the specified file.
"""
if isinstance(fname, list):
lang_dict = {}
for f in fname:
try:
ilang = cls.get_language_for_source(f, languages=languages,
**kwargs)
if early_exit:
return ilang
except ValueError:
continue
lang_dict.setdefault(ilang, 0)
lang_dict[ilang] += 1
if lang_dict:
return max(lang_dict, key=lang_dict.get)
else:
ext = os.path.splitext(fname)[-1]
for ilang in cls.get_map_language_ext().get(ext, []):
if (languages is None) or (ilang in languages):
return ilang
raise ValueError("Cannot determine language for file(s): '%s'" % fname)
@classmethod
def get_map_language_ext(cls):
r"""Return the mapping of all language extensions."""
return _map_language_ext
@classmethod
def get_all_language_ext(cls):
r"""Return the list of all language extensions."""
return list(_map_language_ext.keys())
@classmethod
def get_language_dir(cls):
r"""Return the langauge directory."""
return languages.get_language_dir(cls.language)
@classmethod
def get_language_ext(cls):
r"""Return the language extension, including from the base classes."""
out = cls.language_ext
if out is None:
out = []
for x in cls.base_languages:
out += import_component('model', x).get_language_ext()
return out
def parse_arguments(self, args, default_model_dir=None):
r"""Sort model arguments to determine which one is the executable
and which ones are arguments.
Args:
args (list): List of arguments provided.
default_model_dir (str, optional): Path to directory that should be
used to normalize the model file path if it is not absolute.
Defaults to None and is set to the working_dir.
"""
if isinstance(args, (str, bytes)):
args = args.split()
for i in range(len(args)):
args[i] = str(args[i])
assert(isinstance(args, list))
if default_model_dir is None:
default_model_dir = self.working_dir
self.raw_model_file = args[0]
self.model_file = self.raw_model_file
self.model_args = args[1:]
if (self.language != 'executable') and (not os.path.isabs(self.model_file)):
model_file = os.path.normpath(os.path.join(default_model_dir,
self.model_file))
self.model_file = model_file
self.model_dir = os.path.dirname(self.model_file)
self.debug("model_file = '%s', model_dir = '%s', model_args = '%s'",
self.model_file, self.model_dir, self.model_args)
def init_from_function(self, args):
r"""Initialize model parameters based on the wrapped function."""
if not self.preparsed_function:
yml_mock = dict(self.yml,
name=self.name,
args=self.args,
function=self.function,
is_server=self.is_server,
client_of=self.client_of,
inputs=self.inputs,
outputs=self.outputs,
iter_function_over=self.iter_function_over,
copies=self.copies)
self.preparsed_function = self.preparse_function(yml_mock)
self.model_function_info = self.preparsed_function['model_file']
self.model_function_file = self.model_function_info['model_file']
self.model_function_inputs = self.preparsed_function['inputs']
self.model_function_outputs = self.preparsed_function['outputs']
self.model_outputs_in_inputs = self.preparsed_function['outputs_in_inputs']
model_dir, model_base = os.path.split(self.model_function_file)
model_base = os.path.splitext(model_base)[0]
wrapper_fname = os.path.join(model_dir,
'ygg_%s_%s%s' % (model_base, self.name,
self.language_ext[0]))
lines = self.write_model_wrapper(model_name=self.name,
**self.preparsed_function)
# Write file
if (not os.path.isfile(wrapper_fname)) or self.overwrite:
with open(wrapper_fname, 'w') as fd:
fd.write('\n'.join(lines))
return wrapper_fname
@property
def numeric_logging_level(self):
r"""int: Logging level for the model."""
out = self.logger.getEffectiveLevel()
if self.logging_level:
out = logging.getLevelName(self.logging_level)
return out
@property
def n_sent_messages(self):
r"""dict: Number of messages sent by the model via each connection."""
if (self._mpi_rank > 0) and self.check_mpi_request('stopped'):
out = self._mpi_requests['stopped'].result
return out
out = {}
for x in self.yml.get('output_drivers', []):
x_inst = x.get('instance', None)
if x_inst:
out[x_inst.name] = x_inst.models_recvd.get(self.name, 0)
if self.is_server:
for x in self.yml.get('input_drivers', []):
x_inst = x.get('instance', None)
if x_inst and (x_inst._connection_type == 'rpc_request'):
out[x_inst.name] = x_inst.servers_recvd.get(self.name, 0)
return out
@property
def has_sent_messages(self):
r"""bool: True if output has been received from the model."""
n_msg = self.n_sent_messages
if not n_msg:
return True
return bool(sum(n_msg.values()))
def write_wrappers(self, **kwargs):
r"""Write any wrappers needed to compile and/or run a model.
Args:
**kwargs: Keyword arguments are ignored (only included to
allow cascade from child classes).
Returns:
list: Full paths to any created wrappers.
"""
return []
@classmethod
def install_model_dependencies(cls, dependencies, always_yes=False):
r"""Install any dependencies required by the model.
Args:
dependencies (list): Dependencies that should be installed.
always_yes (bool, optional): If True, the package manager will
not ask users for input during installation. Defaults to
False.
"""
packages = {}
for x in dependencies:
if isinstance(x, str):
x = {'package': x}
if x.get('arguments', None):
cls.install_dependency(always_yes=always_yes, **x)
else:
packages.setdefault(x.get('package_manager', None), [])
packages[x.get('package_manager', None)].append(
x['package'])
for k, v in packages.items():
cls.install_dependency(v, package_manager=k,
always_yes=always_yes)
@classmethod
def install_dependency(cls, package=None, package_manager=None,
arguments=None, command=None, always_yes=False):
r"""Install a dependency.
Args:
package (str): Name of the package that should be installed. If
the package manager supports it, this can include version
requirements.
package_manager (str, optional): Package manager that should be
used to install the package.
arguments (str, optional): Additional arguments that should be
passed to the package manager.
command (list, optional): Command that should be used to
install the package.
always_yes (bool, optional): If True, the package manager will
not ask users for input during installation. Defaults to
False.
"""
assert(package)
if isinstance(package, str):
package = package.split()
if package_manager is None:
if tools.get_conda_prefix():
package_manager = 'conda'
elif platform._is_mac:
package_manager = 'brew'
elif platform._is_linux:
package_manager = 'apt'
elif platform._is_win:
package_manager = 'choco'
yes_cmd = []
cmd_kwargs = {}
if command:
cmd = copy.copy(command)
elif package_manager == 'conda':
cmd = ['conda', 'install'] + package
if platform._is_win: # pragma: windows
# Conda commands must be run on the shell on windows as it
# is implemented as a batch script
cmd.insert(0, 'call')
cmd_kwargs['shell'] = True
yes_cmd = ['-y']
elif package_manager == 'brew':
cmd = ['brew', 'install'] + package
elif package_manager == 'apt':
cmd = ['apt-get', 'install'] + package
if bool(os.environ.get('GITHUB_ACTIONS', False)):
# Only enable sudo for testing, otherwise allow the user to
# decide if they want to run yggdrasil with sudo, or just
# install the dependencies themselves
cmd.insert(0, 'sudo')
yes_cmd = ['-y']
elif package_manager == 'choco':
cmd = ['choco', 'install'] + package
elif package_manager == 'vcpkg':
cmd = ['vcpkg.exe', 'install', '--triplet', 'x64-windows']
cmd += package
else:
package_managers = {'pip': 'python',
'cran': 'r'}
if package_manager in package_managers:
drv = import_component(
'model', package_managers[package_manager])
return drv.install_dependency(
package=package, package_manager=package_manager,
arguments=arguments, always_yes=always_yes)
raise NotImplementedError(f"Unsupported package manager: "
f"{package_manager}")
if arguments:
cmd += arguments.split()
if always_yes:
cmd += yes_cmd
if cmd_kwargs.get('shell', False):
cmd = ' '.join(cmd)
subprocess.check_call(cmd, **cmd_kwargs)
def model_command(self):
r"""Return the command that should be used to run the model.
Returns:
list: Any commands/arguments needed to run the model from the
command line.
"""
return [self.model_file] + self.model_args
@classmethod
def language_executable(cls, **kwargs):
r"""Command required to compile/run a model written in this language
from the command line.
Returns:
str: Name of (or path to) compiler/interpreter executable required
to run the compiler/interpreter from the command line.
"""
if cls.no_executable:
return ''
raise NotImplementedError("language_executable not implemented for '%s'"
% cls.language)
@classmethod
def executable_command(cls, args, unused_kwargs=None, **kwargs):
r"""Compose a command for running a program using the exectuable for
this language (compiler/interpreter) with the provided arguments.
Args:
args (list): The program that returned command should run and any
arguments that should be provided to it.
unused_kwargs (dict, optional): Existing dictionary that unused
keyword arguments should be added to. Defaults to None and is
ignored.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Arguments composing the command required to run the program
from the command line using the executable for this language.
"""
raise NotImplementedError("executable_command not implemented for '%s'"
% cls.language)
@classmethod
def run_executable(cls, args, return_process=False, debug_flags=None,
**kwargs):
r"""Run a program using the executable for this language and the
provided arguments.
Args:
args (list): The program that should be run and any arguments
that should be provided to it.
return_process (bool, optional): If True, the process class is
returned without checking the process output. If False,
communicate is called on the process and the output is parsed
for errors. Defaults to False.
debug_flags (list, optional): Debug executable and flags that should
be prepended to the executable command. Defaults to None and
is ignored.
**kwargs: Additional keyword arguments are passed to
cls.executable_command and tools.popen_nobuffer.
Returns:
str: Output to stdout from the run command if return_process is
False, the process if return_process is True.
Raises:
RuntimeError: If the language is not installed.
RuntimeError: If there is an error when running the command.
"""
unused_kwargs = {}
cmd = cls.executable_command(args, unused_kwargs=unused_kwargs, **kwargs)
if isinstance(debug_flags, list):
cmd = debug_flags + cmd
try:
# Add default keyword arguments
if 'working_dir' in unused_kwargs:
unused_kwargs.setdefault('cwd', unused_kwargs.pop('working_dir'))
unused_kwargs.setdefault('shell', platform._is_win)
# Call command
logger.debug("Running '%s' from %s"
% (' '.join(cmd), unused_kwargs.get('cwd', os.getcwd())))
logger.debug("Process keyword arguments:\n%s\n",
' ' + pformat(unused_kwargs).replace('\n', '\n '))
print(' '.join(cmd))
proc = tools.popen_nobuffer(cmd, **unused_kwargs)
if return_process:
return proc
out, err = proc.communicate()
if proc.returncode != 0:
if out:
logger.info('\n%s' % out.decode('utf-8'))
if err: # pragma: debug
logger.info('\n%s' % err.decode('utf-8'))
raise RuntimeError("Command '%s' failed with code %d."
% (' '.join(cmd), proc.returncode))
out = out.decode("utf-8")
logger.debug('%s\n%s' % (' '.join(cmd), out))
return out
except (subprocess.CalledProcessError, OSError) as e: # pragma: debug
raise RuntimeError("Could not call command '%s': %s"
% (' '.join(cmd), e))
def run_validation(self):
r"""Run the validation script for the model."""
if not self.validation_command:
return
subprocess.check_call(self.validation_command.split(),
cwd=self.working_dir)
def run_model(self, return_process=True, **kwargs):
r"""Run the model. Unless overridden, the model will be run using
run_executable.
Args:
return_process (bool, optional): If True, the process running
the model is returned. If False, the process will block until
the model finishes running. Defaults to True.
**kwargs: Keyword arguments are passed to run_executable.
"""
env = self.set_env()
command = self.model_command()
if self.with_strace or self.with_valgrind:
kwargs.setdefault('debug_flags', self.debug_flags)
self.debug('Working directory: %s', self.working_dir)
self.debug('Command: %s', ' '.join(command))
self.debug('Environment Variables:\n%s', self.pprint(env, block_indent=1))
# Update keywords
# NOTE: Setting forward_signals to False allows faster debugging
# but should not be used in deployment for cases where models are not
# running locally.
default_kwargs = dict(env=env, working_dir=self.working_dir,
forward_signals=False,
shell=platform._is_win)
for k, v in default_kwargs.items():
kwargs.setdefault(k, v)
return self.run_executable(command, return_process=return_process, **kwargs)
@property
def debug_flags(self):
r"""list: Flags that should be prepended to an executable command to
enable debugging."""
pre_args = []
if self.with_strace:
if platform._is_linux:
pre_args += ['strace'] + self.strace_flags
else: # pragma: debug
raise RuntimeError("strace not supported on this OS.")
# TODO: dtruss cannot be run without sudo, sudo cannot be
# added to the model process command if it is not in the original
# yggdrasil CLI call, and must be tested with an executable that
# is not "signed with restricted entitlements" (which most built-in
# utilities (e.g. sleep) are).
# elif platform._is_mac:
# if 'sudo' in sys.argv:
# pre_args += ['sudo']
# pre_args += ['dtruss']
elif self.with_valgrind:
pre_args += ['valgrind'] + self.valgrind_flags
return pre_args
@classmethod
def language_version(cls, version_flags=None, **kwargs):
r"""Determine the version of this language.
Args:
**kwargs: Keyword arguments are passed to cls.run_executable.
Returns:
str: Version of compiler/interpreter for this language.
"""
if version_flags is None:
version_flags = cls.version_flags
return cls.run_executable(version_flags, **kwargs).splitlines()[0].strip()
@classmethod
def is_installed(cls):
r"""Determine if this model driver is installed on the current
machine.
Returns:
bool: Truth of if this model driver can be run on the current
machine.
"""
return (cls.is_language_installed()
and cls.are_base_languages_installed()
and cls.are_dependencies_installed()
and cls.is_interface_installed() and cls.is_comm_installed()
and cls.is_configured() and (not cls.is_disabled()))
@classmethod
def are_base_languages_installed(cls, missing=None):
r"""Determine if the base languages are installed.
Args:
missing (list, optional): A pre-existing list that
missing base languages should be appended to.
Returns:
bool: True if the base langauges are installed. False otherwise.
"""
out = True
for x in cls.base_languages:
if (not out) and (not isinstance(missing, list)): # pragma: no cover
break
out = import_component('model', x).is_installed()
if isinstance(missing, list) and (not out):
missing.append(x)
if missing:
out = False
return out
@classmethod
def are_dependencies_installed(cls):
r"""Determine if the dependencies are installed for the interface (not
including dependencies needed by a particular communication type).
Returns:
bool: True if the dependencies are installed. False otherwise.
"""
out = (cls.language is not None)
for x in cls.interface_dependencies:
if not out: # pragma: no cover
break
out = cls.is_library_installed(x)
return out
@classmethod
def is_interface_installed(cls):
r"""Determine if the interface library for the associated programming
language is installed.
Returns:
bool: True if the interface library is installed.
"""
out = (cls.language is not None)
if out and (cls.interface_library is not None):
out = cls.is_library_installed(cls.interface_library)
return out
@classmethod
def is_language_installed(cls):
r"""Determine if the interpreter/compiler for the associated programming
language is installed.
Returns:
bool: True if the language interpreter/compiler is installed.
"""
out = False
if cls.language is not None:
try:
out = (shutil.which(cls.language_executable()) is not None)
except NotImplementedError: # pragma: debug
out = False
return out
@classmethod
def identify_source_files(cls, args=None, working_dir=None, **kwargs):
r"""Determine the source file based on model arguments.
Args:
args (list, optional): Arguments provided.
working_dir (str, optional): Working directory.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Source files.
"""
out = []
if args:
src = args[0]
if (((not cls.is_source_file(src))
and (cls.language_ext is not None)
and (os.path.splitext(src)[-1]
not in cls.get_all_language_ext()))):
src = os.path.splitext(src)[0] + cls.language_ext[0]
if working_dir and (not os.path.isabs(src)):
src = os.path.normpath(os.path.join(working_dir, src))
if os.path.isfile(src):
out.append(src)
return out
@classmethod
def is_source_file(cls, fname):
r"""Determine if the provided file name points to a source files for
the associated programming language by checking the extension.
Args:
fname (str): Path to file.
Returns:
bool: True if the provided file is a source file, False otherwise.
"""
out = False
model_ext = os.path.splitext(fname)[-1]
if len(model_ext) > 0:
out = (model_ext in cls.get_language_ext())
return out
@classmethod
def is_library_installed(cls, lib, **kwargs):
r"""Determine if a dependency is installed.
Args:
lib (str): Name of the library that should be checked.
**kwargs: Additional keyword arguments are ignored.
Returns:
bool: True if the library is installed, False otherwise.
"""
raise NotImplementedError("Method is_library_installed missing for '%s'"
% cls.language)
@classmethod
def is_disabled(cls):
return (cls.cfg.get(cls.language, 'disable', 'false').lower() == 'true')
@classmethod
def is_configured(cls):
r"""Determine if the appropriate configuration has been performed (e.g.
installation of supporting libraries etc.)
Returns:
bool: True if the language has been configured.
"""
# Check for section & diable
disable_flag = cls.is_disabled()
out = (cls.cfg.has_section(cls.language) and (not disable_flag))
# Check for commtypes
if out and (len(cls.base_languages) == 0):
out = (cls.cfg.get(cls.language, 'commtypes', None) is not None)
# Check for config keys
for k in cls._config_keys:
if not out: # pragma: no cover
break
out = (cls.cfg.get(cls.language, k, None) is not None)
return out
@classmethod
def is_comm_installed(cls, commtype=None, skip_config=False, **kwargs):
r"""Determine if a comm is installed for the associated programming
language.
Args:
commtype (str, optional): If provided, this method will only test
for installation of the specified communication type. Defaults
to None and will check for any installed comm.
skip_config (bool, optional): If True, the config list of comms
installed for this language will not be used to determine if
the comm is installed and the class attribute
supported_comm_options will be processed. Defaults to False and
config options are used in order to improve performance after
initial configuration.
platforms (list, optional): Platforms on which the comm can be
installed. Defaults to None and is ignored unless there is a
value for the commtype in supported_comm_options. This
keyword argument is ignored if skip_config is False.
libraries (list, optional): External libraries that are required
by the specified commtype. Defaults to None and is ignored
unless there is a value for the commtype in supported_comm_options.
This keyword argument is ignored if skip_config is False.
**kwargs: Additional keyword arguments are passed to either
is_comm_installed for the base languages, supported languages,
or is_library_installed as appropriate.
Returns:
bool: True if a comm is installed for this language.
"""
# If there are base_languages for this language, use that language's
# driver to check for comm installation.
if len(cls.base_languages) > 0:
out = True
for x in cls.base_languages:
if not out: # pragma: no cover
break
out = import_component('model', x).is_comm_installed(
commtype=commtype, skip_config=skip_config, **kwargs)
return out
if cls.comms_implicit:
if commtype is None:
return True
return (commtype in tools.get_supported_comm())
# Check for installation based on config option
if not skip_config:
installed_comms = cls.cfg.get(cls.language, 'commtypes', [])
if commtype is None:
return (len(installed_comms) > 0)
else:
return (commtype in installed_comms)
# Check for any comm
if commtype is None:
for c in cls.supported_comms:
if cls.is_comm_installed(commtype=c, skip_config=skip_config,
**kwargs):
return True
# Check that comm is explicitly supported
if commtype not in cls.supported_comms:
return False
# Set & pop keywords
for k, v in cls.supported_comm_options.get(commtype, {}).items():
if kwargs.get(k, None) is None:
kwargs[k] = v
platforms = kwargs.pop('platforms', None)
libraries = kwargs.pop('libraries', [])
# Check platforms
if (platforms is not None) and (platform._platform not in platforms):
return False # pragma: windows
# Check libraries
if (libraries is not None):
for lib in libraries:
if not cls.is_library_installed(lib, **kwargs):
return False
# Check for server on RabbitMQ
if commtype in ['rmq', 'rmq_async']:
from yggdrasil.communication.RMQComm import check_rmq_server
if not check_rmq_server():
return False
return True
@classmethod
def configure(cls, cfg):
r"""Add configuration options for this language.
Args:
cfg (CisConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
out = []
# Section and executable
if (cls.language is not None) and (not cfg.has_section(cls.language)):
cfg.add_section(cls.language)
# Executable type configuration
out += cls.configure_executable_type(cfg)
# Locate executable
if (((not cls.is_language_installed())
and (cls.executable_type is not None))): # pragma: debug
try:
exec_file = cls.language_executable()
if exec_file is not None:
fpath = tools.locate_file(
exec_file, directory_list=cls._executable_search_dirs)
if fpath:
cfg.set(cls.language, cls.executable_type, fpath)
except NotImplementedError:
pass
# Configure libraries
out += cls.configure_libraries(cfg)
# Only do additional configuration if no base languages
if not cls.base_languages:
# Installed comms
comms = []
for c in cls.supported_comms:
if cls.is_comm_installed(commtype=c, cfg=cfg, skip_config=True):
comms.append(c)
cfg.set(cls.language, 'commtypes', comms)
cls.after_registration(cls, cfg=cfg, second_pass=True)
return out
@classmethod
def configure_executable_type(cls, cfg):
r"""Add configuration options specific in the executable type
before the libraries are configured.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
return []
@classmethod
def configure_libraries(cls, cfg):
r"""Add configuration options for external libraries in this language.
Args:
cfg (YggConfigParser): Config class that options should be set for.
Returns:
list: Section, option, description tuples for options that could not
be set.
"""
return []
def get_io_env(self, input_drivers=None, output_drivers=None):
r"""Get environment variables set by the input/output drivers.
Args:
input_drivers (list, optional): Input drivers. Defaults to the
yaml entry if not provided.
output_drivers (list, optional): Output drivers. Defaults to the
yaml entry if not provided.
Returns:
dict: Environment variables.
"""
if input_drivers is None:
input_drivers = self.yml.get('input_drivers', [])
if output_drivers is None:
output_drivers = self.yml.get('output_drivers', [])
out = {}
if self.copies > 1:
from yggdrasil.drivers.DuplicatedModelDriver import (
DuplicatedModelDriver)
base_name = DuplicatedModelDriver.get_base_name(self.name)
else:
base_name = self.name
for x in input_drivers + output_drivers:
if 'instance' in x:
model_env = x['instance'].model_env
if self.name in model_env:
out.update(model_env[self.name])
elif base_name in model_env:
out.update(model_env[base_name])
return out
@classmethod
def set_env_class(cls, existing=None, **kwargs):
r"""Set environment variables that are instance independent.
Args:
existing (dict, optional): Existing dictionary of environment
variables that new variables should be added to. Defaults
to a copy of os.environ.
**kwargs: Additional keyword arguments are ignored.
Returns:
dict: Environment variables for the model process.
"""
if existing is None: # pragma: no cover
existing = {}
existing.update(os.environ)
return existing
def set_env(self, existing=None, **kwargs):
r"""Get environment variables that should be set for the model process.
Args:
existing (dict, optional): Existing dictionary of environment
variables that new variables should be added to. Defaults
to a copy of os.environ.
**kwargs: Additional keyword arguments are passed to set_env_class.
Returns:
dict: Environment variables for the model process.
"""
from yggdrasil.config import ygg_cfg
if existing is None:
existing = {}
existing.update(copy.deepcopy(self.env))
existing.update(self.get_io_env())
env = self.set_env_class(existing=existing, **kwargs)
env.update(YGG_SUBPROCESS="True",
YGG_MODEL_INDEX=str(self.model_index),
YGG_MODEL_LANGUAGE=self.language,
YGG_MODEL_COPIES=str(self.copies),
# YGG_PYTHON_EXEC=sys.executable,
YGG_DEFAULT_COMM=tools.get_default_comm(),
YGG_NCLIENTS=str(len(self.clients)))
if multitasking._on_mpi:
env['YGG_MPI_RANK'] = str(multitasking._mpi_rank)
if self.copies > 1:
from yggdrasil.drivers.DuplicatedModelDriver import (
DuplicatedModelDriver)
env['YGG_MODEL_COPY'] = str(self.copy_index)
env['YGG_MODEL_NAME'] = DuplicatedModelDriver.get_base_name(
self.name)
else:
env['YGG_MODEL_NAME'] = self.name
if self.allow_threading or (self.copies > 1):
env['YGG_THREADING'] = '1'
if isinstance(self.is_server, dict):
env['YGG_SERVER_INPUT'] = self.is_server['input']
env['YGG_SERVER_OUTPUT'] = self.is_server['output']
if self.logging_level:
env['YGG_MODEL_DEBUG'] = self.logging_level
replace = [k for k in env.keys() if ':' in k]
for k in replace:
env[k.replace(':', '__COLON__')] = env.pop(k)
if ygg_cfg.get('general', 'allow_multiple_omp', False):
env['KMP_DUPLICATE_LIB_OK'] = 'True'
return env
def before_start(self, no_queue_thread=False, **kwargs):
r"""Actions to perform before the run starts.
Args:
no_queue_thread (bool, optional): If True, the queue_thread is not
created/started. Defaults to False.
**kwargs: Keyword arguments are pased to run_model.
"""
# if multitasking._on_mpi:
# self.init_mpi_env()
self.model_process = self.run_model(**kwargs)
# Start thread to queue output
if not no_queue_thread:
self.queue_thread = multitasking.YggTaskLoop(
target=self.enqueue_output_loop,
name=self.name + '.EnqueueLoop')
self.queue_thread.start()
if multitasking._on_mpi:
self.init_mpi()
def queue_close(self):
r"""Close the queue for messages from the model process."""
self.model_process.stdout.close()
def queue_recv(self):
r"""Receive a message from the model process."""
return self.model_process.stdout.readline()
def enqueue_output_loop(self):
r"""Keep passing lines to queue."""
try:
line = self.queue_recv()
except BaseException as e: # pragma: debug
print(e)
line = ""
if (len(line) == 0):
self.queue_thread.set_break_flag()
try:
self.queue.put(self._exit_line)
except multitasking.AliasDisconnectError: # pragma: debug
self.error("Queue disconnected")
self.debug("End of model output")
try:
self.queue_close()
except BaseException: # pragma: debug
pass
else:
try:
self.queue.put(line.decode('utf-8'))
except BaseException as e: # pragma: debug
warnings.warn("Error in printing output: %s" % e)
def before_loop(self):
r"""Actions before loop."""
self.debug('Running %s from %s with cwd %s and env %s',
self.model_command(), os.getcwd(), self.working_dir,
pformat(self.env))
# def init_mpi_env(self):
# r"""Receive env information to the partner model."""
# self.env = self.recv_mpi(tag=self._mpi_tags['ENV'])
def init_mpi(self):
r"""Initialize MPI communicator."""
if self._mpi_rank == 0:
self._mpi_comm = None
else:
self.recv_mpi(tag=self._mpi_tags['START'])
self._mpi_requests['stopped'] = multitasking.MPIRequestWrapper(
self.recv_mpi(tag=self._mpi_tags['STOP_RANKX'],
dont_block=True))
def send_mpi(self, msg, tag=0, dont_block=False):
r"""Send an MPI message."""
self.debug("send %d (%d) [%s]: %s (blocking=%s)", tag,
self._mpi_tag + tag, self._inv_mpi_tags[tag],
msg, not dont_block)
kws = {'dest': self._mpi_partner_rank, 'tag': (self._mpi_tag + tag)}
if dont_block: # pragma: debug
# return self._mpi_comm.isend(msg, **kws)
raise NotImplementedError("Non-blocking MPI send not tested.")
else:
return self._mpi_comm.send(msg, **kws)
def recv_mpi(self, tag=0, dont_block=False):
r"""Receive an MPI message."""
self.debug('recv %d (%d) [%s] (blocking=%s)', tag,
self._mpi_tag + tag, self._inv_mpi_tags[tag],
not dont_block)
kws = {'source': self._mpi_partner_rank, 'tag': (self._mpi_tag + tag)}
if dont_block:
return self._mpi_comm.irecv(**kws)
else:
return self._mpi_comm.recv(**kws)
def stop_mpi_partner(self, msg=None, dest=0, tag=None):
r"""Send a message to stop the MPI partner model on the main process."""
if self._mpi_comm and (not self.check_mpi_request('stopping')):
if tag is None:
tag = self._mpi_tags['STOP_RANK0']
if msg is None:
if self.errors or self.model_process_returncode:
msg = 'ERROR'
else:
msg = 'STOPPING'
self.debug("stop_mpi_partner: %d, %s", tag, msg)
# Don't call test()
self._mpi_requests['stopping'] = multitasking.MPIRequestWrapper(
self.send_mpi(msg, tag=tag), completed=True)
def wait_on_mpi_request(self, name, timeout=False):
r"""Wait for a request to be completed.
Args:
name (str): Name that request was registered under.
Returns:
bool, str: Received message or False if the request does not
exist or is not complete.
"""
self.debug("Waiting on '%s' (timeout=%s)", name, timeout)
try:
out = self._mpi_requests[name].wait(timeout=timeout)
if out == 'ERROR': # pragma: debug
self.errors.append(out)
return out
except asyncio.TimeoutError: # pragma: debug
self.info("Timeout for MPI '%s' request", name)
def check_mpi_request(self, name):
r"""Check if a request has been completed.
Args:
name (str): Name that request was registered under.
Returns:
bool, str: Received message or False if the request does not
exist or is not complete.
"""
if self._mpi_comm and (name in self._mpi_requests):
out, msg = self._mpi_requests[name].test()
if out and (msg == 'ERROR'): # pragma: debug
self.errors.append(msg)
return out
return False
def set_break_flag(self, *args, **kwargs):
r"""Stop the model loop."""
self.stop_mpi_partner()
super(ModelDriver, self).set_break_flag(*args, **kwargs)
def run_loop(self):
r"""Loop to check if model is still running and forward output."""
# Continue reading until there is not any output
if self.model_process_returncode:
self.errors.append(self.model_process_returncode)
if self.check_mpi_request('stopped'):
self.debug("Stop requested by MPI partner.")
self.set_break_flag()
try:
line = self.queue.get_nowait()
except Empty:
# This sleep is necessary to allow changes in queue without lock
self.sleep()
return
except multitasking.AliasDisconnectError: # pragma: debug
self.error("Queue disconnected")
self.set_break_flag()
else:
if (line == self._exit_line) or self.check_mpi_request('stopped'):
self.debug("No more output")
self.set_break_flag()
else:
self.print_encoded(line, end="")
sys.stdout.flush()
def run_finally(self):
r"""Actions to perform in finally clause of try/except wrapping
run."""
# Ensure the MPI partner gets cleaned up following an error
self.stop_mpi_partner()
super(ModelDriver, self).run_finally()
def after_loop(self):
r"""Actions to perform after run_loop has finished. Mainly checking
if there was an error and then handling it."""
self.debug('')
self.stop_mpi_partner()
if self.queue_thread is not None:
self.queue_thread.join(self.sleeptime)
if self.queue_thread.is_alive():
self.debug("Queue thread still alive")
# Loop was broken from outside, kill the queueing thread
self.kill_process()
return
self.wait_process(self.timeout, key_suffix='.after_loop')
self.kill_process()
self.debug(("Closing input/output drivers:\n"
"\tinput: %s\n\toutput: %s")
% ([drv['name'] for drv in
self.yml.get('input_drivers', [])],
[drv['name'] for drv in
self.yml.get('output_drivers', [])]))
for drv in self.yml.get('input_drivers', []):
if 'instance' in drv:
if self.language == 'mpi':
drv['instance'].wait(self.timeout)
drv['instance'].on_model_exit('output', self.name,
errors=self.errors)
for drv in self.yml.get('output_drivers', []):
if 'instance' in drv:
if self.language == 'mpi':
drv['instance'].wait(self.timeout)
drv['instance'].on_model_exit('input', self.name,
errors=self.errors)
@property
def io_errors(self):
r"""list: Errors produced by input/output drivers to this model."""
errors = []
for drv in self.yml.get('input_drivers', []):
if 'instance' in drv:
errors += drv['instance'].errors
for drv in self.yml.get('output_drivers', []):
if 'instance' in drv:
errors += drv['instance'].errors
return errors
@property
def model_process_complete(self):
r"""bool: Has the process finished or not. Returns True if the process
has not started."""
if self.model_process is None: # pragma: debug
return True
return (self.model_process.poll() is not None)
@property
def model_process_returncode(self):
r"""int: Return code for the model process where non-zero values
indicate that there was an error."""
if self.model_process_complete and (self.model_process is not None):
return self.model_process.returncode
return 0
def wait_process(self, timeout=None, key=None, key_suffix=None):
r"""Wait for some amount of time for the process to finish.
Args:
timeout (float, optional): Time (in seconds) that should be waited.
Defaults to None and is infinite.
key (str, optional): Key that should be used to register the timeout.
Defaults to None and set based on the stack trace.
Returns:
bool: True if the process completed. False otherwise.
"""
if not self.was_started: # pragma: debug
return True
return self.wait_on_function(lambda: self.model_process_complete,
timeout=timeout, key_level=1, key=key,
key_suffix=key_suffix)
def kill_process(self):
r"""Kill the process running the model, checking return code."""
if not self.was_started: # pragma: debug
self.debug('Process was never started.')
self.set_break_flag()
self.event_process_kill_called.set()
self.event_process_kill_complete.set()
if self.event_process_kill_called.is_set(): # pragma: debug
self.debug('Process has already been killed.')
return
self.event_process_kill_called.set()
with self.lock:
self.debug('')
ignore_error_code = False
if not self.model_process_complete: # pragma: debug
self.debug("Process is still running. Killing it.")
try:
self.model_process.kill()
self.debug("Waiting %f s for process to be killed",
self.timeout)
self.wait_process(self.timeout, key_suffix='.kill_process')
except BaseException: # pragma: debug
self.exception("Error killing model process")
if not self.has_sent_messages:
ignore_error_code = True
assert(self.model_process_complete)
if (((self.model_process_returncode != 0)
and (not ignore_error_code))):
self.error(("return code of %s indicates model error. "
"(sent messages: %s)"),
str(self.model_process_returncode),
self.n_sent_messages)
self.event_process_kill_complete.set()
if self.queue_thread is not None:
if not self.was_break: # pragma: debug
# Wait for messages to be printed
self.debug("Waiting for queue_thread to finish up.")
self.queue_thread.wait(self.timeout)
if self.queue_thread.is_alive(): # pragma: debug
self.debug("Setting break flag for queue_thread to finish up.")
self.queue_thread.set_break_flag()
self.queue_thread.wait(self.timeout)
try:
self.queue_close()
self.queue_thread.wait(self.timeout)
except BaseException: # pragma: debug
self.exception("Closed during concurrent action")
if self.queue_thread.is_alive(): # pragma: debug
self.error("Queue thread was not terminated.")
def graceful_stop(self):
r"""Gracefully stop the driver."""
self.debug('')
if self.has_sent_messages:
self.wait_process(self.timeout, key_suffix='.graceful_stop')
super(ModelDriver, self).graceful_stop()
def cleanup_products(self):
r"""Remove products created in order to run the model."""
if self.overwrite and (not self.preserve_cache):
self.remove_products()
self.restore_files()
def cleanup(self):
r"""Remove compile executable."""
self.cleanup_products()
super(ModelDriver, self).cleanup()
def restore_files(self):
r"""Restore modified files to their original form."""
for (original, modified) in self.modified_files:
if os.path.isfile(original):
os.remove(modified)
shutil.move(original, modified)
def remove_products(self):
r"""Delete products produced during the process of running the model."""
products = self.products
source_products = self.source_products + self.wrapper_products
remove_products(products, source_products)
@classmethod
def cleanup_dependencies(cls, products=[], verbose=False):
r"""Cleanup dependencies."""
for x in products:
if os.path.isfile(x):
if verbose: # pragma: debug
print("Removing %s" % x)
os.remove(x)
# Methods for automated model wrapping
@classmethod
def run_code(cls, lines, process_kwargs={}, **kwargs):
r"""Run code by first writing it as an executable and then calling
the driver.
Args:
lines (list): Lines of code to be wrapped as an executable.
process_kwargs (dict, optional): Keyword arguments that should
be passed to run_model. Defaults to {}.
**kwargs: Additional keyword arguments are passed to the
write_executable method.
"""
name = 'test_code_%s' % str(uuid.uuid4())[:13].replace('-', '_')
working_dir = os.getcwd()
code_dir = tempfile.gettempdir()
# code_dir = working_dir
fname = os.path.join(code_dir, name + cls.get_language_ext()[0])
lines = cls.write_executable(lines, **kwargs)
with open(fname, 'w') as fd:
fd.write('\n'.join(lines))
inst = None
try:
assert(os.path.isfile(fname))
inst = cls(name, [fname], working_dir=working_dir)
inst.run_model(return_process=False, **process_kwargs)
except BaseException: # pragma: debug
logger.error('Failed generated code:\n%s' % '\n'.join(lines))
raise
finally:
if os.path.isfile(fname):
os.remove(fname)
if inst is not None:
inst.cleanup()
@classmethod
def format_function_param(cls, key, default=None, replacement=None,
ignore_method=False, **kwargs):
r"""Return the formatted version of the specified key.
Args:
key (str): Key in cls.function_param mapping that should be
formatted.
default (str, optional): Format that should be returned if key
is not in cls.function_param. Defaults to None.
replacement (str, optional): Format that should be used instead
of the one in cls.function_param. Defaults to None.
**kwargs: Additional keyword arguments are used in formatting the
request function parameter.
Returns:
str: Formatted string.
Raises:
NotImplementedError: If key is not in cls.function_param and default
is not set.
"""
if replacement is not None:
fmt = replacement
elif (not ignore_method) and hasattr(cls, 'format_function_param_%s' % key):
return getattr(cls, 'format_function_param_%s' % key)(**kwargs)
else:
if (key not in cls.function_param) and (default is None):
raise NotImplementedError(("Language %s dosn't have an entry in "
"function_param for key '%s'")
% (cls.language, key))
fmt = cls.function_param.get(key, default)
return fmt.format(**kwargs)
@classmethod
def parse_var_definition(cls, io, value, outputs_in_inputs=None):
r"""Extract information about input/output variables from a
string definition.
Args:
io (str): Description of variables contained in the provided
string. Must be 'inputs' or 'outputs'.
value (str): String containing one or more variable definitions.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
Returns:
list: List of information about the variables contained in
the provided string.
Raises:
AssertionError: If io is not 'inputs' or 'outputs'.
NotImplementedError: If the def_regex for the specified
io is not defined.
"""
if outputs_in_inputs is None:
outputs_in_inputs = cls.outputs_in_inputs
assert(io in ['inputs', 'outputs'])
if ('%s_def_regex' % io) not in cls.function_param: # pragma: debug
raise NotImplementedError(
("'%s_def_regex' not defined for "
"language %s.") % (io, cls.language))
if 'multiple_outputs' in cls.function_param:
multi_re = cls.function_param['multiple_outputs']
for x in '[]()':
multi_re = multi_re.replace(x, '\\' + x)
multi_re = multi_re.format(outputs='(.*?)')
match = re.search(multi_re, value)
if match is not None:
value = match.group(1)
new_val = []
io_re = cls.format_function_param('%s_def_regex' % io)
for i, ivar in enumerate(cls.split_variables(value)):
igrp = {'name': ivar}
x = re.search(io_re, ivar)
if x is not None:
igrp = x.groupdict()
for k in list(igrp.keys()):
if igrp[k] is None:
del igrp[k]
if 'native_type' in igrp:
igrp['native_type'] = igrp['native_type'].replace(' ', '')
igrp['datatype'] = cls.get_json_type(igrp['native_type'])
igrp['position'] = i
if (io == 'outputs') and outputs_in_inputs:
igrp = cls.input2output(igrp)
new_val.append(igrp)
return new_val
@classmethod
def parse_function_definition(cls, model_file, model_function,
contents=None, match=None,
expected_outputs=[], outputs_in_inputs=None):
r"""Get information about the inputs & outputs to a model from its
defintition if possible.
Args:
model_file (str): Full path to the file containing the model
function's declaration.
model_function (str): Name of the model function.
contents (str, optional): String containing the function definition.
If not provided, the function definition is read from model_file.
match (re.Match, optional): Match object for the function regex. If
not provided, a search is performed using function_def_regex.
expected_outputs (list, optional): List of names or variable
information dictionaries for outputs that are expected
to be extracted from the function's definition. This
variable is only used if outputs_in_inputs is True and
outputs are not extracted from the function's defintion
using the regex for this language. Defaults to [].
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
Returns:
dict: Parameters extracted from the function definitions.
"""
if outputs_in_inputs is None:
outputs_in_inputs = cls.outputs_in_inputs
out = {}
if match or ('function_def_regex' in cls.function_param):
if not match:
function_regex = cls.format_function_param(
'function_def_regex', function_name=model_function)
if contents is None:
with open(model_file, 'r') as fd:
contents = fd.read()
match = re.search(function_regex, contents)
if not match: # pragma: debug
raise RuntimeError(("Could not find function match in file:\n"
"%s\nfor regex:\nr'%s'")
% (pformat(contents), function_regex))
# Match brackets to determine where the function definition is
if isinstance(cls.brackets, tuple):
assert(len(cls.brackets) == 2)
contents = match.group(0)
counts = {k: 0 for k in cls.brackets}
first_zero = 0
re_brackets = r'[\%s\%s]' % cls.brackets
for x in re.finditer(re_brackets, contents):
counts[x.group(0)] += 1
if (((counts[cls.brackets[0]] > 0)
and (counts[cls.brackets[0]]
== counts[cls.brackets[1]]))):
first_zero = x.span(0)[1]
break
assert((first_zero == 0) or (first_zero == len(contents)))
# This is currently commented as regex's are
# sufficient so far, but this may be needed in the
# future to isolate single definitions.
# if (first_zero != 0) and first_zero != len(contents):
# contents = contents[:first_zero]
# match = re.search(function_regex, contents)
# assert(match)
out = match.groupdict()
for k in list(out.keys()):
if out[k] is None:
del out[k]
for io in ['inputs', 'outputs']:
if io in out:
out[io] = cls.parse_var_definition(
io, out[io], outputs_in_inputs=outputs_in_inputs)
out['model_file'] = model_file
if outputs_in_inputs and expected_outputs and (not out.get('outputs', False)):
missing_expected_outputs = []
for o in expected_outputs:
if isinstance(o, dict):
o = o['name']
missing_expected_outputs.append(o)
out['outputs'] = []
for x in out['inputs']:
if x['name'] not in missing_expected_outputs:
continue
missing_expected_outputs.remove(x['name'])
out['outputs'].append(cls.input2output(x))
if missing_expected_outputs: # pragma: debug
raise ValueError(("Could not locate %d output "
"variable(s) in input: %s")
% (len(missing_expected_outputs),
missing_expected_outputs))
for x in out['outputs']:
out['inputs'].remove(x)
if out.get('flag_var', None):
flag_var = {'name': out.pop('flag_var'),
'datatype': {'type': 'flag'}}
if out.get('flag_type', None):
flag_var['native_type'] = out.pop('flag_type').replace(' ', '')
flag_var['datatype'] = cls.get_json_type(flag_var['native_type'])
out['flag_var'] = flag_var
cls.check_flag_var(out, outputs_in_inputs=outputs_in_inputs)
return out
@classmethod
def check_flag_var(cls, info, outputs_in_inputs=None):
r"""Check if the flag variable should be treated as an output.
Args:
info (dict): Information about the function.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
"""
if outputs_in_inputs is None: # pragma: debug
outputs_in_inputs = cls.outputs_in_inputs
flag_t = cls.type_map['flag']
if (((info.get('flag_var', {}).get('native_type', flag_t) != flag_t)
or (not outputs_in_inputs))):
if info.get('outputs', []): # pragma: debug
logger.warn("Support for returning outputs via parameter(s) "
"and return value is not yet support. The return "
"value will be assumed to be a flag indicating "
"the success of the model.")
info['outputs_in_inputs'] = True
else:
info['outputs'] = [info.pop('flag_var')]
info['outputs_in_inputs'] = False
@classmethod
def channels2vars(cls, channels):
r"""Convert a list of channels to a list of variables.
Args:
channels (list): List of channel dictionaries.
Returns:
list: List of variables.
"""
if not isinstance(channels, list):
channels = [channels]
variables = []
for x in channels:
variables += x['vars']
def get_pos(x):
return x.get('position', 0)
variables = sorted(variables, key=get_pos)
return variables
@classmethod
def expand_server_io(cls, inputs, outputs, client_comms=[]):
r"""Update inputs/outputs w/ information about server that will be
using them.
Args:
inputs (list): List of model inputs including types.
outputs (list): List of model outputs including types.
client_comms (list, optional): List of the names of client comms
that should be removed from the list of outputs. Defaults to [].
"""
if client_comms:
warnings.warn("When wrapping a model function, client comms "
"must either be initialized outside the function, "
"pass a 'global_scope' parameter to the "
"comm initialization (e.g. Python, R, Matlab), "
"or use a 'WITH_GLOBAL_SCOPE' macro "
"(e.g. C, C++, Fortran) around the initialization "
"so that they are persistent "
"across calls and the call or recv/send methods "
"must be called explicitly (as opposed to the "
"function inputs/outputs which will be handled "
"by the wrapper). This model's client comms are:\n"
"\t%s" % client_comms)
# Replace server input w/ split input/output and remove client
# connections from inputs
for i, x in enumerate(inputs):
if x.get('server_replaces', False):
inputs[x['server_replaces']['input_index']] = (
x['server_replaces']['input'])
outputs.insert(x['server_replaces']['output_index'],
x['server_replaces']['output'])
rm_outputs = [i for i, x in enumerate(outputs)
if x['name'] in client_comms]
for i in rm_outputs[::-1]:
outputs.pop(i)
@classmethod
def preparse_function(cls, yml):
r"""Extract information about inputs and outputs based on the
function being wrapped.
Args:
yml (dict): Options that will be used to initialize the model.
Returns:
dict: Information about the parsed function.
"""
if 'function' not in yml:
return
if yml.get('is_server', False):
assert(isinstance(yml['is_server'], dict))
if cls.function_param is None:
raise ValueError(("Language %s is not parameterized "
"and so functions cannot be automatically "
"wrapped as a model.") % cls.language)
source_files = cls.identify_source_files(**yml)
if not source_files: # pragma: debug
raise ValueError("Could not identify any source files.")
model_function_file = source_files[0]
if not os.path.isfile(model_function_file): # pragma: debug
raise ValueError("Source file does not exist: '%s'"
% model_function_file)
# Update input/outputs based on parsed source code
client_comms = ['%s:%s_%s' % (yml['name'], x, yml['name'])
for x in yml.get('client_of', [])]
model_function_inputs = copy.copy(yml.get('inputs', []))
model_function_outputs = copy.copy(yml.get('outputs', []))
cls.expand_server_io(
model_function_inputs, model_function_outputs,
client_comms=client_comms)
expected_outputs = []
for x in model_function_outputs:
expected_outputs += x.get('vars', [])
model_outputs_in_inputs = yml.get('outputs_in_inputs', None)
model_function_info = cls.parse_function_definition(
model_function_file, yml['function'],
expected_outputs=expected_outputs,
outputs_in_inputs=model_outputs_in_inputs)
if model_outputs_in_inputs is None:
model_outputs_in_inputs = model_function_info.get(
'outputs_in_inputs', None)
model_flag = cls.update_io_from_function(
model_function_info, yml['function'],
inputs=model_function_inputs,
outputs=model_function_outputs,
iter_function_over=yml.get('iter_function_over', []))
yml['preparsed_function'] = {
'model_file': model_function_info,
'model_function': yml['function'],
'inputs': model_function_inputs,
'outputs': model_function_outputs,
'model_flag': model_flag,
'outputs_in_inputs': model_outputs_in_inputs,
'copies': yml.get('copies', 1),
'iter_function_over': yml.get('iter_function_over', []),
'skip_update_io': True}
return yml['preparsed_function']
@classmethod
def update_io_from_function(cls, model_file, model_function,
inputs=[], outputs=[], contents=None,
outputs_in_inputs=None, iter_function_over=[]):
r"""Update inputs/outputs from the function definition.
Args:
model_file (str): Full path to the file containing the model
function's declaration.
model_function (str): Name of the model function.
inputs (list, optional): List of model inputs including types.
Defaults to [].
outputs (list, optional): List of model outputs including types.
Defaults to [].
contents (str, optional): Contents of file to parse rather than
re-reading the file. Defaults to None and is ignored.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
iter_function_over (array, optional): Variable(s) that should be
received or sent as an array, but iterated over. Defaults to
an empty array and is ignored.
Returns:
dict, None: Flag variable used by the model. If None, the
model does not use a flag variable.
"""
# Read info from the source code
if (((isinstance(model_file, str) and os.path.isfile(model_file))
or (contents is not None))): # pragma: debug
expected_outputs = []
for x in outputs:
expected_outputs += x.get('vars', [])
info = cls.parse_function_definition(model_file, model_function,
contents=contents,
expected_outputs=expected_outputs)
logger.warn("The new execution pattern reuses the parsed "
"source code parameters. Double check results:\n%s."
% pformat(info))
elif isinstance(model_file, dict):
info = model_file
else:
info = {"inputs": [], "outputs": []}
if outputs_in_inputs is None: # pragma: debug
outputs_in_inputs = info.get('outputs_in_inputs',
cls.outputs_in_inputs)
info_map = {io: OrderedDict([(x['name'], x) for x in info.get(io, [])])
for io in ['inputs', 'outputs']}
# Determine flag variable
flag_var = None
if info.get('flag_var', None):
flag_var = dict(info['flag_var'], name='model_flag')
# Check for vars matching names of input/output channels
for io, io_var in zip(['inputs', 'outputs'], [inputs, outputs]):
if (io == 'outputs') and outputs_in_inputs:
io_map = info_map['inputs']
else:
io_map = info_map[io]
for x in io_var:
if x.get('vars', []):
continue
var_name = x['name'].split(':')[-1]
if var_name in io_map:
x['vars'] = [var_name]
for k in ['length', 'shape', 'ndim']:
kvar = '%s_var' % k
if kvar in io_map[var_name]:
x['vars'].append(io_map[var_name][kvar])
# Move variables if outputs in inputs
if outputs_in_inputs:
if ((((len(inputs) + len(outputs)) == len(info.get('inputs', [])))
and (len(info.get('outputs', [])) == 0))):
for i, vdict in enumerate(info['inputs'][:len(inputs)]):
inputs[i].setdefault('vars', [vdict['name']])
assert(inputs[i]['vars'] == [vdict['name']])
for i, vdict in enumerate(info['inputs'][len(inputs):]):
outputs[i].setdefault('vars', [vdict['name']])
assert(outputs[i]['vars'] == [vdict['name']])
for x in outputs:
for i, v in enumerate(x.get('vars', [])):
if v in info_map['inputs']:
info_map['outputs'][v] = cls.input2output(
info_map['inputs'].pop(v))
for io, io_var in zip(['inputs', 'outputs'], [inputs, outputs]):
for x in io_var:
x['channel_name'] = x['name']
x['channel'] = (x['name'].split(':', 1)[-1]
+ '_%s_channel' % io[:-1])
for i, v in enumerate(x.get('vars', [])):
if v in info_map[io]:
x['vars'][i] = info_map[io][v]
if (len(io_var) == 1) and info_map.get(io, False):
io_var[0].setdefault('vars', list(info_map[io].values()))
for x in io_var:
if 'vars' not in x:
x['vars'] = [copy.deepcopy(x)]
x['vars'][0]['name'] = x['name'].split(':', 1)[-1]
for v in x['vars']:
if isinstance(v.get('datatype', None), str):
v['datatype'] = {'type': v['datatype']}
if isinstance(x.get('datatype', None), str):
x['datatype'] = {'type': x['datatype']}
# Check for user defined length variables and add flag to
# length variables
for x in io_var:
for k in ['length', 'shape', 'ndim']:
for v in x['vars']:
if k + '_var' in v:
v[k + '_var'] = info_map[io][v[k + '_var']]
# v[k + '_var']['is_' + k + '_var'] = True
v[k + '_var']['is_length_var'] = True
else:
v[k + '_var'] = False
# Update datatypes
if cls.is_typed:
for x in io_var:
non_length = []
for v in x['vars']:
if not v.get('is_length_var', False):
non_length.append(v)
if ((x.get('datatype', None)
and (not is_default_typedef(x['datatype'])))):
if (len(non_length) == 1):
non_length[0]['datatype'] = x['datatype']
else:
# TODO: Remove types associated with length?
assert(x['datatype']['type'] == 'array')
assert(len(x['datatype']['items'])
== len(non_length))
for v, t in zip(non_length, x['datatype']['items']):
v['datatype'] = t
else:
if (len(non_length) == 1):
x['datatype'] = non_length[0]['datatype']
else:
x['datatype'] = {
'type': 'array',
'items': [v['datatype'] for v in non_length]}
x['datatype']['from_function'] = True
for v in x['vars']:
if 'native_type' not in v:
v['native_type'] = cls.get_native_type(**v)
# Update types based on iteration
for x in io_var:
for v in x.get('vars', [x]):
if v['name'] in iter_function_over:
v['iter_datatype'] = copy.deepcopy(v.get('datatype', {}))
if v.get('datatype', {}):
assert(v['datatype']['type'] == 'scalar')
v['datatype']['type'] = '1darray'
v.pop('native_type', None)
v['native_type'] = cls.get_native_type(**v)
# Finalize io variables
for x in inputs:
cls.finalize_function_io('input', x)
for x in outputs:
cls.finalize_function_io('output', x)
return flag_var
@classmethod
def finalize_function_io(cls, direction, x):
r"""Finalize info for an input/output channel following function
parsing.
Args:
direction (str): Direction of channel ('input' or 'output')
"""
assert(direction in ['input', 'output'])
@classmethod
def write_model_wrapper(cls, model_file, model_function,
inputs=[], outputs=[], model_flag=None,
outputs_in_inputs=None, verbose=False, copies=1,
iter_function_over=[], verbose_model=False,
skip_update_io=False, model_name=None):
r"""Return the lines required to wrap a model function as an integrated
model.
Args:
model_file (str): Full path to the file containing the model
function's declaration.
model_function (str): Name of the model function.
inputs (list, optional): List of model inputs including types.
Defaults to [].
outputs (list, optional): List of model outputs including types.
Defaults to [].
model_flag (dict, optional): Information about the flag that
should be used to track the success of yggdrasil send/recv
calls. This should only be provided if update_io_from_function
has already been called. Defaults to None and is determined
by update_io_from_function.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to the class attribute outputs_in_inputs.
verbose (bool, optional): If True, the contents of the created file
are displayed. Defaults to False.
copies (int, optional): Number of times the model driver is
duplicated. If more than one, no error will be raised in the
event there is never a call the the function. Defaults to 1.
iter_function_over (array, optional): Variable(s) that should be
received or sent as an array, but iterated over. Defaults to
an empty array and is ignored.
skip_update_io (bool, optional): If True, update_io_from_function
will not be called. Defaults to False.
verbose_model (bool, optional): If True, print statements will
be added after every line in the model. Defaults to False.
model_name (str, optional): Name given to the model. Defaults to
None.
Returns:
list: Lines of code wrapping the provided model with the necessary
code to run it as part of an integration.
"""
if outputs_in_inputs is None:
outputs_in_inputs = cls.outputs_in_inputs
# TODO: Determine how to encode dependencies on external variables in models
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
# Update types based on the function definition for typed languages
if not skip_update_io:
model_flag = cls.update_io_from_function(
model_file, model_function,
inputs=inputs, outputs=outputs,
outputs_in_inputs=outputs_in_inputs,
iter_function_over=iter_function_over)
if isinstance(model_file, dict):
model_file = model_file['model_file']
# Update types based on iteration
iter_function_idx = None
iter_ivars = []
iter_ovars = []
if iter_function_over:
iter_function_idx = {'name': 'idx_func_iter',
'datatype': {'type': 'int'}}
if cls.zero_based:
iter_function_idx['begin'] = int(0)
else:
iter_function_idx['begin'] = int(1)
for x in inputs:
iter_ivars += [v for v in x.get('vars', [x])
if v['name'] in iter_function_over]
if not iter_ivars: # pragma: debug
raise RuntimeError("The iter_function_over model "
"parameter must include an input to "
"iterate over. To expand output arrays "
"into component elements, use the "
"'iterate' transformation.")
for x in outputs:
iter_ovars += [v for v in x.get('vars', [x])
if v['name'] in iter_function_over]
if iter_ivars[0].get('length_var', False):
iter_function_idx['end'] = iter_ivars[0]['length_var']
for v in iter_ovars:
v['length_var'] = iter_ivars[0]['length_var']['name']
if isinstance(iter_function_idx['end'], dict):
iter_function_idx['end'] = iter_function_idx['end']['name']
else:
iter_function_idx['end'] = cls.format_function_param(
'len', variable=iter_ivars[0]['name'],
extra=iter_ivars[0])
for v in iter_ivars + iter_ovars:
v['iter_var'] = iter_function_idx
# Declare variables and flag, then define flag
lines = []
flag_var = {'name': 'flag', 'datatype': {'type': 'flag'}}
iter_var = {'name': 'first_iter', 'datatype': {'type': 'flag'}}
free_vars = []
definitions = []
if 'declare' in cls.function_param:
for x in inputs + outputs:
lines += cls.write_channel_decl(
x, definitions=definitions,
requires_freeing=free_vars)
lines += cls.write_declaration(flag_var,
definitions=definitions,
requires_freeing=free_vars)
lines += cls.write_declaration(iter_var,
definitions=definitions,
requires_freeing=free_vars)
if model_flag:
lines += cls.write_declaration(
model_flag, definitions=definitions,
requires_freeing=free_vars)
if iter_function_idx:
lines += cls.write_declaration(
iter_function_idx, definitions=definitions,
requires_freeing=free_vars)
for x in inputs + outputs:
for v in x.get('vars', [x]):
lines += cls.write_declaration(
v, definitions=definitions,
requires_freeing=free_vars)
lines += definitions
nline_preamble = len(lines)
lines.append(cls.format_function_param(
'assign', name=flag_var['name'],
value=cls.function_param.get(
'true_flag', cls.function_param['true'])))
lines.append(cls.format_function_param(
'assign', name=iter_var['name'],
value=cls.function_param.get(
'true_flag', cls.function_param['true'])))
# Declare/define input and output channels
for x in inputs:
lines += cls.write_channel_def('input',
requires_freeing=free_vars, **x)
for x in outputs:
lines += cls.write_channel_def('output',
requires_freeing=free_vars, **x)
# Receive inputs before loop
for x in inputs:
if x.get('outside_loop', False):
lines += cls.write_model_recv(x['channel'], x,
flag_var=flag_var)
# Loop
loop_lines = []
# Receive inputs
any_loop_inputs = False
loop_iter_var = iter_var
if copies > 1:
loop_iter_var = None
for x in inputs:
if not x.get('outside_loop', False):
any_loop_inputs = True
loop_lines += cls.write_model_recv(x['channel'], x,
flag_var=flag_var,
iter_var=loop_iter_var,
allow_failure=True)
# Prepare output array
if iter_function_over:
for v in iter_ivars:
if v['name'] in iter_function_over:
loop_lines += cls.write_finalize_iiter(v)
for v in iter_ovars:
if v['name'] in iter_function_over:
loop_lines += cls.write_initialize_oiter(v)
# Call model
loop_lines += cls.write_model_function_call(
model_function, model_flag, inputs, outputs,
outputs_in_inputs=outputs_in_inputs,
iter_function_idx=iter_function_idx)
# Finalize output array
if iter_function_over:
for v in iter_ovars:
if v['name'] in iter_function_over:
loop_lines += cls.write_finalize_oiter(v)
# Send outputs
for x in outputs:
if not x.get('outside_loop', False):
loop_lines += cls.write_model_send(x['channel'], x,
flag_var=flag_var)
loop_lines.append(cls.format_function_param(
'assign', name=iter_var['name'],
value=cls.function_param.get('false_flag',
cls.function_param['false'])))
# Add break if there are not any inputs inside the loop
if not any_loop_inputs:
loop_lines.append(cls.format_function_param(
'assign', name=flag_var['name'],
value=cls.function_param.get(
'false_flag', cls.function_param['false'])))
# Add loop in while block
flag_cond = cls.format_function_param('flag_cond',
default='{flag_var}',
flag_var=flag_var['name'])
lines += cls.write_while_loop(flag_cond, loop_lines)
# Send outputs after loop
for x in outputs:
if x.get('outside_loop', False):
lines += cls.write_model_send(x['channel'], x,
flag_var=flag_var)
# Free variables
for x in free_vars:
lines += cls.write_free(x)
# Add prints
if verbose_model: # pragma: debug
idx = len(lines) - 1
while (idx > nline_preamble):
if 'else' not in lines[idx]:
indent = ' ' * (len(lines[idx])
- len(lines[idx].lstrip()))
lines.insert(idx, indent + cls.format_function_param(
'print', message=("%s: line %d" % (model_file, idx))))
idx -= 1
# Wrap as executable with interface & model import
prefix = None
if 'interface' in cls.function_param:
ygglib = cls.interface_library
if ygglib in cls.internal_libraries:
ygglib = cls.internal_libraries[ygglib]['source']
if cls.interface_inside_exec:
lines.insert(0, cls.format_function_param(
'interface', interface_library=ygglib))
else:
prefix = [cls.format_function_param(
'interface', interface_library=ygglib)]
out = cls.write_executable(lines, prefix=prefix,
model_name=model_name,
imports={'filename': model_file,
'function': model_function})
if verbose: # pragma: debug
logger.info('\n' + '\n'.join(out))
else:
logger.debug('\n' + '\n'.join(out))
return out
@classmethod
def write_channel_decl(cls, var, **kwargs):
r"""Write a channel declaration.
Args:
var (dict): Information dictionary for the channel.
being declared.
**kwargs: Additional keyword arguments are passed to class's
write_declaration.
Returns:
list: The lines declaring the variable.
"""
out = []
if not cls.dont_declare_channel:
out = cls.write_declaration(
{'name': var['channel'], 'type': 'comm'}, **kwargs)
if (((var.get('datatype', None) is not None)
and ('{channel_type}' in cls.function_param['input']))):
var['channel_type'] = '%s_type' % var['channel']
out += cls.write_type_decl(
var['channel_type'], var['datatype'],
definitions=kwargs.get('definitions', None),
requires_freeing=kwargs.get('requires_freeing', None))
return out
@classmethod
def write_type_decl(cls, name, datatype, name_base=None,
requires_freeing=None, definitions=None,
no_decl=False):
r"""Get lines declaring the datatype within the language.
Args:
name (str): Name of variable that should be declared.
datatype (dict): Type definition.
requires_freeing (list, optional): List that variables requiring
freeing should be appended to. Defaults to None.
definitions (list, optional): Existing list that variable
definitions should be added to. Defaults to None if not
provided and definitions will be included in the returned
lines.
no_decl (bool, optional): If True, the variable is not
declared, but supporting variables will be. Defaults
to False.
Returns:
list: Lines required to define a type declaration.
"""
out = []
if name_base is None:
name_base = name
if datatype['type'] == 'array':
if 'items' in datatype:
assert(isinstance(datatype['items'], list))
out += cls.write_declaration(
{'name': '%s_items' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'dtype',
'length': len(datatype['items'])}},
definitions=definitions,
requires_freeing=requires_freeing)
for i, x in enumerate(datatype['items']):
# Prevent recusion
x_copy = copy.deepcopy(x)
x_copy.pop('items', None)
x_copy.pop('properties', None)
out += cls.write_type_decl(
None, x_copy,
name_base=('%s_item%d' % (name_base, i)),
definitions=definitions,
requires_freeing=requires_freeing,
no_decl=True)
elif datatype['type'] == 'object':
if 'properties' in datatype:
assert(isinstance(datatype['properties'], dict))
precision = 0
if datatype['properties']:
precision = max([len(k) for k in
datatype['properties'].keys()])
precision = max(80, precision)
out += cls.write_declaration(
{'name': '%s_keys' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'bytes',
'length': len(datatype['properties']),
'precision': precision}},
definitions=definitions,
requires_freeing=requires_freeing)
out += cls.write_declaration(
{'name': '%s_vals' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'dtype',
'length': len(datatype['properties'])}},
definitions=definitions,
requires_freeing=requires_freeing)
for i, (k, v) in enumerate(datatype['properties'].items()):
# Prevent recusion
v_copy = copy.deepcopy(v)
v_copy.pop('items', None)
v_copy.pop('properties', None)
out += cls.write_type_decl(
None, v_copy,
name_base=('%s_prop%d' % (name_base, i)),
requires_freeing=requires_freeing,
definitions=definitions,
no_decl=True)
elif datatype['type'] == 'ndarray':
if 'shape' in datatype:
out += cls.write_declaration(
{'name': '%s_shape' % name_base,
'datatype': {
'type': '1darray', 'subtype': 'int',
'precision': 64, 'length': len(datatype['shape'])}},
definitions=definitions,
requires_freeing=requires_freeing)
elif datatype['type'] in (['ply', 'obj', '1darray', 'scalar',
'boolean', 'null', 'number', 'integer',
'string', 'class', 'function', 'instance',
'schema', 'any']
+ list(constants.VALID_TYPES.keys())):
pass
else: # pragma: debug
raise ValueError(("Cannot create %s version of type "
"'%s'") % (cls.language, datatype['type']))
if not no_decl:
out += cls.write_declaration(
{'name': name, 'type': 'dtype'})
return out
@classmethod
def write_type_def(cls, name, datatype, name_base=None,
use_generic=False):
r"""Get lines declaring the data type within the language.
Args:
name (str): Name of variable that definition should be stored in.
datatype (dict): Type definition.
use_generic (bool, optional): If True variables serialized
and/or deserialized by the type will be assumed to be
generic objects. Defaults to False.
Returns:
list: Lines required to define a type definition.
"""
out = []
fmt = None
keys = {}
if use_generic:
keys['use_generic'] = cls.function_param['true']
else:
keys['use_generic'] = cls.function_param['false']
typename = datatype['type']
if name_base is None:
name_base = name
if datatype['type'] == 'array':
if 'items' in datatype:
assert(isinstance(datatype['items'], list))
keys['nitems'] = len(datatype['items'])
keys['items'] = '%s_items' % name_base
if cls.zero_based:
idx_offset = 0
else:
idx_offset = 1
for i, x in enumerate(datatype['items']):
# Prevent recusion
x_copy = copy.deepcopy(x)
x_copy.pop('items', None)
x_copy.pop('properties', None)
out += cls.write_type_def(
cls.format_function_param(
'index', variable=keys['items'],
index=(i + idx_offset)), x_copy,
name_base=('%s_item%d' % (name_base, i)),
use_generic=use_generic)
else:
keys['nitems'] = 0
keys['items'] = cls.function_param['null']
keys['use_generic'] = cls.function_param['true']
elif datatype['type'] == 'object':
keys['use_generic'] = cls.function_param['true']
if 'properties' in datatype:
assert(isinstance(datatype['properties'], dict))
keys['nitems'] = len(datatype['properties'])
keys['keys'] = '%s_keys' % name_base
keys['values'] = '%s_vals' % name_base
if cls.zero_based:
idx_offset = 0
else:
idx_offset = 1
for i, (k, v) in enumerate(datatype['properties'].items()):
# Prevent recusion
v_copy = copy.deepcopy(v)
v_copy.pop('items', None)
v_copy.pop('properties', None)
out.append(cls.format_function_param(
'assign', value='\"%s\"' % k,
name=cls.format_function_param(
'index', variable=keys['keys'],
index=(i + idx_offset))))
out += cls.write_type_def(
cls.format_function_param(
'index', variable=keys['values'],
index=(i + idx_offset)), v_copy,
name_base=('%s_prop%d' % (name_base, i)),
use_generic=use_generic)
else:
keys['nitems'] = 0
keys['keys'] = cls.function_param['null']
keys['values'] = cls.function_param['null']
elif datatype['type'] in ['ply', 'obj']:
pass
elif datatype['type'] == '1darray':
for k in ['subtype', 'precision']:
keys[k] = datatype[k]
keys['precision'] = int(keys['precision'])
keys['length'] = datatype.get('length', '0')
keys['units'] = datatype.get('units', '')
elif datatype['type'] == 'ndarray':
for k in ['subtype', 'precision']:
keys[k] = datatype[k]
keys['precision'] = int(keys['precision'])
if 'shape' in datatype:
shape_var = '%s_shape' % name_base
if cls.zero_based:
idx_offset = 0
else:
idx_offset = 1
for i, x in enumerate(datatype['shape']):
out.append(cls.format_function_param(
'assign', value=x,
name=cls.format_function_param(
'index', variable=shape_var,
index=(i + idx_offset))))
keys['ndim'] = len(datatype['shape'])
keys['shape'] = shape_var
typename = 'ndarray_arr'
else:
keys['ndim'] = 0
keys['shape'] = cls.function_param['null']
keys['units'] = datatype.get('units', '')
elif (typename == 'scalar') or (typename in constants.VALID_TYPES):
keys['subtype'] = datatype.get('subtype', datatype['type'])
keys['units'] = datatype.get('units', '')
if keys['subtype'] in ['bytes', 'string', 'unicode']:
keys['precision'] = int(datatype.get('precision', 0))
else:
keys['precision'] = int(datatype['precision'])
typename = 'scalar'
elif datatype['type'] in ['boolean', 'null', 'number',
'integer', 'string']:
keys['type'] = datatype['type']
typename = 'default'
elif (typename in ['class', 'function']):
keys['type'] = typename
typename = 'pyobj'
elif typename in ['instance', 'any']:
keys['use_generic'] = cls.function_param['true']
typename = 'empty'
elif typename in ['schema']:
keys['use_generic'] = cls.function_param['true']
else: # pragma: debug
raise ValueError("Cannot create %s version of type '%s'"
% (cls.language, typename))
fmt = cls.format_function_param('init_type_%s' % typename, **keys)
out.append(cls.format_function_param('assign', name=name,
value=fmt))
return out
@classmethod
def write_channel_def(cls, key, datatype=None, **kwargs):
r"""Write an channel definition.
Args:
key (str): Entry in cls.function_param that should be used.
datatype (dict, optional): Data type associated with the channel.
Defaults to None and is ignored.
**kwargs: Additional keyword arguments are passed as parameters
to format_function_param.
Returns:
list: Lines required to declare and define an output channel.
"""
out = []
if (datatype is not None) and ('{channel_type}' in cls.function_param[key]):
kwargs['channel_type'] = '%s_type' % kwargs['channel']
out += cls.write_type_def(
kwargs['channel_type'], datatype,
use_generic=kwargs.get('use_generic', False))
dir_map = {'input': 'recv', 'output': 'send'}
try_keys = [dir_map[key] + '_converter', 'transform']
try_vals = []
if all([bool(kwargs.get(k, False)) for k in try_keys]): # pragma: debug
# TODO: Handling merger of the transforms in yaml or
# remove the *_converter options entirely
raise RuntimeError(("Transforms are specified in multiple "
"locations for this input: %s")
% str(try_keys))
for k in try_keys:
if k in kwargs:
v = kwargs[k]
if not isinstance(v, list):
v = [v]
try_vals += v
# This last transform is used because the others are assumed
# to be applied by the connection driver
if try_vals and isinstance(try_vals[-1], str):
try_key = '%s_%s' % (try_vals[-1], key)
if ((('python_interface' in cls.function_param)
and (try_key in cls.python_interface))):
kwargs['python_interface'] = cls.python_interface[try_key]
if ((('format_str' in kwargs)
and ('python_interface_format' in cls.function_param))):
key = 'python_interface_format'
kwargs['format_str'] = kwargs['format_str'].encode(
"unicode_escape").decode('utf-8')
else:
key = 'python_interface'
out += [cls.format_function_param(key, **kwargs)]
return out
@classmethod
def write_model_function_call(cls, model_function, flag_var, inputs, outputs,
outputs_in_inputs=None, on_failure=None,
format_not_flag_cond=None, format_flag_cond=None,
iter_function_idx=None):
r"""Write lines necessary to call the model function.
Args:
model_function (str): Handle of the model function that should be
called.
flag_var (str): Name of variable that should be used as a flag.
inputs (list): List of dictionaries describing inputs to the model.
outputs (list): List of dictionaries describing outputs from the model.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to the class attribute outputs_in_inputs.
on_failure (list, optional): Lines to be executed if the model
call fails. Defaults to an error message. This variable
is only used if flag_var is not None and outputs_in_inputs
is True.
format_not_flag_cond (str, optional): Format string that produces
a conditional expression that evaluates to False when the
model flag indicates a failure. Defaults to None and the
class's value for 'not_flag_cond' in function_param is used
if it exists. If it does not exist, format_flag_cond is used.
format_flag_cond (str, optional): Format string that produces
a conditional expression that evaluates to True when the
model flag indicates a success. Defaults to None and the
defaults class's value for 'flag_cond' in function_param is
used if it exists. If it does not exist, the flag is
directly evaluated as if it were a boolean.
iter_function_idx (dict, optional): Variable that serves as an
index to iterate over variables. Defaults to None.
Returns:
list: Lines required to carry out a call to a model function in
this language.
"""
if outputs_in_inputs is None: # pragma: debug
outputs_in_inputs = cls.outputs_in_inputs
func_inputs = cls.channels2vars(inputs)
func_outputs = cls.channels2vars(outputs)
if iter_function_idx:
for src in [func_inputs, func_outputs]:
for i, x in enumerate(src):
if 'iter_datatype' in x:
src[i] = dict(
x, datatype=x['iter_datatype'],
name=cls.format_function_param(
'index', variable=x['name'],
index=iter_function_idx['name'],
extra=x),
length_var=False)
if isinstance(flag_var, dict):
flag_var = flag_var['name']
out = cls.write_function_call(
model_function, inputs=func_inputs, outputs=func_outputs,
flag_var=flag_var, outputs_in_inputs=outputs_in_inputs)
if flag_var and outputs_in_inputs:
if (not format_flag_cond) and ('not_flag_cond' in cls.function_param):
flag_cond = cls.format_function_param(
'not_flag_cond', flag_var=flag_var,
replacement=format_not_flag_cond)
else: # pragma: debug
# flag_cond = '%s (%s)' % (
# cls.function_param['not'],
# cls.format_function_param(
# 'flag_cond', default='{flag_var}', flag_var=flag_var,
# replacement=format_flag_cond))
raise RuntimeError("Untested code below. Uncomment "
"at your own risk if you find "
"use case for it.")
if on_failure is None:
on_failure = [cls.format_function_param(
'error', error_msg="Model call failed.")]
out += cls.write_if_block(flag_cond, on_failure)
if iter_function_idx:
out = cls.write_for_loop(iter_function_idx['name'],
iter_function_idx['begin'],
iter_function_idx['end'],
out)
return out
@classmethod
def write_model_recv(cls, channel, recv_var, flag_var='flag',
iter_var=None, allow_failure=False,
alt_recv_function=None):
r"""Write a model receive call include checking the return flag.
Args:
channel (str): Name of variable that the channel being received from
was stored in.
recv_var (dict, list): Information of one or more variables that
receieved information should be stored in.
flag_var (str, optional): Name of flag variable that the flag should
be stored in. Defaults to 'flag',
iter_var (str, optional): Name of flag signifying when the
model is in it's first iteration. If allow_failure is
True and iter_var is provided, an error will be raised
if iter_var is True. Defaults to None.
allow_failure (bool, optional): If True, the returned lines will
call a break if the flag is False. Otherwise, the returned
lines will issue an error. Defaults to False.
alt_recv_function (str, optional): Alternate receive function
format string. Defaults to None and is ignored.
Returns:
list: Lines required to carry out a receive call in this language.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
recv_var_str = recv_var
if not isinstance(recv_var, str):
recv_var_par = cls.channels2vars(recv_var)
recv_var_str = cls.prepare_output_variables(
recv_var_par, in_inputs=cls.outputs_in_inputs,
for_yggdrasil=True)
else:
recv_var_par = cls.split_variables(recv_var_str)
expanded_recv_var = None
if (len(recv_var_par) > 1) and ('multiple_outputs' in cls.function_param):
expanded_recv_var = recv_var_str
recv_var_str = 'temp_%s' % recv_var_par[0]['name']
if isinstance(flag_var, dict):
flag_var = flag_var['name']
if isinstance(iter_var, dict):
iter_var = iter_var['name']
if cls.outputs_in_inputs:
inputs = [recv_var_str]
outputs = [flag_var]
else:
inputs = []
outputs = [flag_var, recv_var_str]
if cls.include_channel_obj:
inputs.insert(0, channel)
lines = cls.write_function_call(
cls.format_function_param('recv_function', channel=channel,
replacement=alt_recv_function),
inputs=inputs, outputs=outputs, include_arg_count=cls.include_arg_count)
if 'not_flag_cond' in cls.function_param:
flag_cond = cls.format_function_param('not_flag_cond',
flag_var=flag_var)
else:
flag_cond = '%s (%s)' % (
cls.function_param['not'],
cls.format_function_param('flag_cond', default='{flag_var}',
flag_var=flag_var))
fail_message = cls.escape_quotes(
"Could not receive %s." % recv_var_str)
if allow_failure:
fail_message = cls.escape_quotes(
'End of input from %s.' % recv_var_str)
if_block = [cls.format_function_param('print', message=fail_message),
cls.function_param.get('break', 'break')]
if iter_var is not None:
if_block = cls.write_if_block(
iter_var,
[cls.format_function_param(
'error', error_msg=cls.escape_quotes(
'No input from %s.' % recv_var_str))],
if_block)
else:
if_block = [cls.format_function_param('error', error_msg=fail_message)]
lines += cls.write_if_block(flag_cond, if_block)
# Check if single element should be expanded
if expanded_recv_var:
# lines.append(cls.format_function_param(
# 'print_generic', object=recv_var_str))
if 'expand_mult' in cls.function_param: # pragma: matlab
lines.append(cls.format_function_param(
'expand_mult', name=expanded_recv_var, value=recv_var_str))
elif 'assign_mult' in cls.function_param:
lines.append(cls.format_function_param(
'assign_mult', name=expanded_recv_var, value=recv_var_str))
else:
lines.append(cls.format_function_param(
'assign', name=expanded_recv_var, value=recv_var_str))
elif len(recv_var_par) == 1:
lines += cls.write_expand_single_element(recv_var_str)
return lines
@classmethod
def write_model_send(cls, channel, send_var, flag_var='flag',
allow_failure=False):
r"""Write a model send call include checking the return flag.
Args:
channel (str): Name of variable that the channel being sent to
was stored in.
send_var (dict, list): Information on one or more variables
containing information that will be sent.
flag_var (str, optional): Name of flag variable that the flag should
be stored in. Defaults to 'flag',
allow_failure (bool, optional): If True, the returned lines will
call a break if the flag is False. Otherwise, the returned
lines will issue an error. Defaults to False.
Returns:
list: Lines required to carry out a send call in this language.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
send_var_str = send_var
if not isinstance(send_var_str, str):
send_var_par = cls.channels2vars(send_var)
send_var_str = cls.prepare_input_variables(
send_var_par, for_yggdrasil=True)
if isinstance(flag_var, dict):
flag_var = flag_var['name']
if cls.include_channel_obj:
send_var_str = [channel, send_var_str]
lines = cls.write_function_call(
cls.format_function_param('send_function', channel=channel),
inputs=send_var_str,
outputs=flag_var, include_arg_count=cls.include_arg_count)
flag_cond = '%s (%s)' % (
cls.function_param['not'],
cls.format_function_param('flag_cond', default='{flag_var}',
flag_var=flag_var))
fail_message = cls.escape_quotes(
"Could not send %s." % send_var_str)
if allow_failure: # pragma: no cover
# This is not particularly useful, but is included for completion
if_block = [cls.format_function_param('print', message=fail_message),
cls.function_param.get('break', 'break')]
else:
if_block = [cls.format_function_param('error', error_msg=fail_message)]
lines += cls.write_if_block(flag_cond, if_block)
return lines
@classmethod
def write_print_var(cls, var, prefix_msg=None):
r"""Get the lines necessary to print a variable in this language.
Args:
var (dict): Variable information.
prefix_msg (str, optional): Message that should be printed
before the variable. Defaults to None and is ignored.
Returns:
list: Lines printing the specified variable.
"""
out = []
print_key = None
varname = var
if isinstance(var, dict):
varname = var['name']
typename = var.get(
'datatype',
{'type': var.get('type', None)}).get('type', None)
if ('print_%s' % typename) in cls.function_param:
print_key = ('print_%s' % typename)
elif 'print_generic' in cls.function_param:
print_key = 'print_generic'
elif 'print_generic' in cls.function_param:
print_key = 'print_generic'
if print_key:
if prefix_msg is not None:
out.append(cls.format_function_param(
'print', message=prefix_msg))
out += [cls.format_function_param(
print_key, object=varname)]
return out
@classmethod
def write_print_input_var(cls, var, **kwargs):
r"""Get the lines necessary to print an input variable in this
language.
Args:
var (dict): Variable information.
**kwargs: Additional keyword arguments are passed to write_print_var.
Returns:
list: Lines printing the specified variable.
"""
return cls.write_print_var(var, **kwargs)
@classmethod
def write_print_output_var(cls, var, in_inputs=False, **kwargs):
r"""Get the lines necessary to print an output variable in this
language.
Args:
var (dict): Variable information.
in_inputs (bool, optional): If True, the output variable
is passed in as an input variable to be populated.
Defaults to False.
**kwargs: Additional keyword arguments are passed to write_print_var.
Returns:
list: Lines printing the specified variable.
"""
return cls.write_print_var(var, **kwargs)
@classmethod
def write_function_def(cls, function_name, inputs=[], outputs=[],
input_var=None, output_var=None,
function_contents=[],
outputs_in_inputs=False,
opening_msg=None, closing_msg=None,
print_inputs=False, print_outputs=False,
skip_interface=False, function_keys=None,
verbose=False, **kwargs):
r"""Write a function definition.
Args:
function_name (str): Name fo the function being defined.
inputs (list, optional): List of inputs to the function.
Defaults to []. Ignored if input_var provided.
outputs (list, optional): List of outputs from the function.
Defaults to []. If not provided, no return call is
added to the function body. Ignored if output_var
provided.
input_var (str, optional): Full string specifying input in
the function definition. If not provided, this will be
created based on the contents of the inputs variable.
output_var (str, optional): Full string specifying output in
the function definition. If not provided, this will be
created based on the contents of the outputs variable.
function_contents (list, optional): List of lines comprising
the body of the function. Defaults to [].
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
opening_msg (str, optional): String that should be printed
before the function contents (and inputs if print_inputs
is True). Defaults to None and is ignored.
closing_msg (str, optional): String that should be printed
after the function contents (and outputs if print_outputs
is True). Defaults to None and is ignored.
print_inputs (bool, optional): If True, the input variables
will be printed before the function contents. Defaults
to False.
print_outputs (bool, optional): If True, the output variables
will be printed after the function contents. Defaults to
False.
skip_interface (bool, optional): If True, the line including
the interface will be skipped. Defaults to False.
function_keys (tuple, optional): 2 element tuple that
specifies the keys for the function_param entries that
should be used to begin & end a function definition.
Defaults to ('function_def_begin', function_def_end').
verbose (bool, optional): If True, the contents of the created file
are displayed. Defaults to False.
**kwargs: Additional keyword arguments are passed to
cls.format_function_param.
Returns:
list: Lines completing the function call.
Raises:
NotImplementedError: If the function_param attribute for the
class is not defined.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
if function_keys is None:
function_keys = ('function_def_begin', 'function_def_end')
out = []
interface_lines = []
if ('interface' in cls.function_param) and (not skip_interface):
ygglib = cls.interface_library
if ygglib in cls.internal_libraries:
ygglib = cls.internal_libraries[ygglib]['source']
interface_lines.append(cls.format_function_param(
'interface', interface_library=ygglib))
if not cls.interface_inside_exec:
out += interface_lines
flag_var = {}
if input_var is None:
input_var = cls.prepare_input_variables(
inputs, in_definition=True)
if output_var is None:
output_var = cls.prepare_output_variables(
outputs, in_inputs=outputs_in_inputs, in_definition=True)
print_input_lines = []
if print_inputs and inputs:
for x in inputs:
print_input_lines += cls.write_print_input_var(
x, prefix_msg=('INPUT[%s]:' % x['name']))
print_output_lines = []
if print_outputs and outputs:
for x in outputs:
print_output_lines += cls.write_print_output_var(
x, prefix_msg=('OUTPUT[%s]:' % x['name']),
in_inputs=outputs_in_inputs)
old_outputs = []
if outputs_in_inputs:
if output_var:
input_var = cls.prepare_input_variables(
[input_var, output_var])
flag_var = kwargs.get('flag_var', 'flag')
if isinstance(flag_var, str):
flag_var = {'name': flag_var}
flag_var.setdefault('datatype', 'flag')
flag_var.setdefault('value', cls.function_param.get(
'true_flag', cls.function_param['true']))
old_outputs = outputs
outputs = [flag_var]
output_var = cls.prepare_output_variables(outputs)
out.append(cls.format_function_param(
function_keys[0], function_name=function_name,
input_var=input_var, output_var=output_var, **kwargs))
if cls.interface_inside_exec:
out += [cls.function_param['indent'] + x
for x in interface_lines]
free_vars = []
if 'declare' in cls.function_param:
definitions = []
if not cls.types_in_funcdef:
for o in (inputs + old_outputs):
out += [cls.function_param['indent'] + x for
x in cls.write_declaration(
o, definitions=definitions,
requires_freeing=free_vars,
is_argument=True)]
for o in outputs:
out += [cls.function_param['indent'] + x for
x in cls.write_declaration(
o, definitions=definitions,
requires_freeing=free_vars)]
out += [cls.function_param['indent'] + x
for x in definitions]
if outputs_in_inputs:
out.append(cls.function_param['indent']
+ cls.format_function_param(
'assign', **flag_var))
if opening_msg:
out.append(cls.function_param['indent']
+ cls.format_function_param(
'print', message=opening_msg))
if print_inputs:
for x in print_input_lines:
out.append(cls.function_param['indent'] + x)
for x in function_contents:
out.append(cls.function_param['indent'] + x)
if print_outputs:
for x in print_output_lines:
out.append(cls.function_param['indent'] + x)
if closing_msg:
out.append(cls.function_param['indent']
+ cls.format_function_param(
'print', message=closing_msg))
# This is not currently used by the tests, but may be
# needed in the future
assert(not free_vars)
# for x in free_vars:
# out += [cls.function_param['indent'] + line
# for line in cls.write_free(x)]
if output_var and ('return' in cls.function_param):
out.append(cls.function_param['indent']
+ cls.format_function_param(
'return', output_var=output_var))
if function_keys[1] in cls.function_param:
out.append(cls.format_function_param(
function_keys[1], function_name=function_name))
else:
out.append(cls.function_param.get('block_end', ''))
if verbose: # pragma: debug
logger.info('\n' + '\n'.join(out))
else:
logger.debug('\n' + '\n'.join(out))
return out
@classmethod
def write_function_call(cls, function_name, inputs=[], outputs=[],
include_arg_count=False,
outputs_in_inputs=False, **kwargs):
r"""Write a function call.
Args:
function_name (str): Name of the function being called.
inputs (list, optional): List of inputs to the function.
Defaults to [].
outputs (list, optional): List of outputs from the function.
Defaults to [].
include_arg_count (bool, optional): If True, the count of input
arguments is included as the first argument. Defaults to
False.
outputs_in_inputs (bool, optional): If True, the outputs are
presented in the function definition as inputs. Defaults
to False.
**kwargs: Additional keyword arguments are passed to
cls.format_function_param.
Returns:
list: Lines completing the function call.
"""
if outputs_in_inputs:
inputs = inputs + [cls.prepare_output_variables(
outputs, in_inputs=outputs_in_inputs)]
flag_var = kwargs.get('flag_var', None)
if (flag_var is None) and ('function_call_noout' not in cls.function_param):
flag_var = 'flag'
outputs = []
if flag_var:
outputs.append(flag_var)
kwargs.setdefault('input_var', cls.prepare_input_variables(inputs))
kwargs.setdefault('output_var', cls.prepare_output_variables(outputs))
nout = len(cls.split_variables(kwargs['output_var']))
if include_arg_count:
narg = len(cls.split_variables(kwargs['input_var']))
kwargs['input_var'] = cls.prepare_input_variables(
[str(narg), kwargs['input_var']])
if (nout == 0) and ('function_call_noout' in cls.function_param):
call_str = cls.format_function_param(
'function_call_noout', function_name=function_name, **kwargs)
else:
call_str = cls.format_function_param(
'function_call', default='{function_name}({input_var})',
function_name=function_name, **kwargs)
if nout == 0:
out = [call_str + cls.function_param.get('line_end', '')]
elif (nout > 1) and ('assign_mult' in cls.function_param):
out = [cls.format_function_param(
'assign_mult', name=kwargs['output_var'], value=call_str)]
else:
out = [cls.format_function_param(
'assign', name=kwargs['output_var'], value=call_str)]
return out
@classmethod
def write_executable_import(cls, model_name=None, **kwargs):
r"""Add import statements to executable lines.
Args:
**kwargs: Keyword arguments for import statement.
Returns:
list: Lines required to complete the import.
"""
# This code is currently unused, but may be needed in the
# future to import a dependency directly
# if ('filename' not in kwargs) and ('import_nofile' in cls.function_param):
# key = 'import_nofile'
# else:
# key = 'import'
# return [cls.format_function_param(key, **kwargs)]
out = []
if 'import' in cls.function_param:
out = [cls.format_function_param('import', **kwargs)]
return out
@classmethod
def write_executable(cls, lines, prefix=None, suffix=None,
function_definitions=None, imports=None,
model_name=None):
r"""Return the lines required to complete a program that will run
the provided lines.
Args:
lines (list): Lines of code to be wrapped as an executable.
prefix (list, optional): Lines of code that should proceed the
wrapped code. Defaults to None and is ignored. (e.g. C/C++
include statements).
suffix (list, optional): Lines of code that should follow the
wrapped code. Defaults to None and is ignored.
function_definitions (list, optional): Lines of code defining
functions that will beused by the code contained in lines.
Defaults to None and is ignored.
imports (list, optional): Kwargs for packages that should
be imported for use by the executable. Defaults to
None and is ignored.
model_name (str, optional): Name given to the model. Defaults to
None.
Returns:
lines: Lines of code wrapping the provided lines with the
necessary code to run it as an executable (e.g. C/C++'s main).
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
# Add imports
if imports is not None:
if not isinstance(imports, list):
imports = [imports]
import_lines = []
for kws in imports:
import_lines += cls.write_executable_import(**kws)
if prefix is None:
prefix = []
prefix += import_lines
# Add standard & user defined prefixes
if ((('exec_prefix' in cls.function_param)
and (cls.function_param['exec_prefix'] not in lines))):
out.append(cls.function_param['exec_prefix'])
out.append('')
if prefix is not None:
if not isinstance(prefix, (list, tuple)):
prefix = [prefix]
out += prefix
out.append('')
if (((not cls.function_param.get('functions_defined_last', False))
and (function_definitions is not None))):
out += function_definitions
out.append('')
# Add code with begin/end book ends
if ((('exec_begin' in cls.function_param)
and (cls.function_param['exec_begin'] not in '\n'.join(lines)))):
out.append(cls.function_param['exec_begin'])
if not isinstance(lines, (list, tuple)):
lines = [lines]
for x in lines:
out.append(cls.function_param['indent'] + x)
out.append(cls.function_param.get('exec_end',
cls.function_param.get(
'block_end', '')))
else:
out += lines
if out[-1]:
out.append('')
# Add standard & user defined suffixes
if suffix is not None:
if not isinstance(suffix, (list, tuple)):
suffix = [suffix]
out += suffix
out.append('')
if ((('exec_suffix' in cls.function_param)
and (cls.function_param['exec_suffix'] not in lines))):
out.append(cls.function_param['exec_suffix'])
out.append('')
if (((cls.function_param.get('functions_defined_last', False))
and (function_definitions is not None))): # pragma: matlab
out += function_definitions
out.append('')
if cls.max_line_width:
new_out = []
for iout in out:
new_out += cls.split_line(iout)
out = new_out
return out
@classmethod
def escape_quotes(cls, x):
r"""Escape quotes in a string.
Args:
x (str): String to escape quotes in.
Returns:
str: x with escaped quotes.
"""
out = x.replace('"', '\\\"')
out = out.replace("'", "\\\'")
return out
@classmethod
def split_line(cls, line, length=None, force_split=False):
r"""Split a line as close to (or before) a given character as
possible.
Args:
line (str): Line to split.
length (int, optional): Maximum length of split lines. Defaults
to cls.max_line_width if not provided.
force_split (bool, optional): If True, force a split to
occur at the specified length. Defauts to False.
Returns:
list: Set of lines resulting from spliting the provided line.
"""
out = []
if not line.lstrip():
return [line]
nindent = line.index(line.lstrip()[0])
block_end = cls.function_param['block_end'].lower()
if '\n' in line:
out = line.split('\n')
for i in range(1, len(out)):
if out[i].lstrip().lower().startswith(block_end):
nindent -= len(cls.function_param['indent'])
out[i] = (nindent * ' ') + out[i]
new_out = []
for x in out:
new_out += cls.split_line(x, length=length,
force_split=force_split)
return new_out
if length is None:
length = cls.max_line_width
if (length is None) or (len(line) < length):
return [line]
length_allow = (length - len(cls.function_param.get(
'continuation_before', '')))
if force_split:
isplit = length_allow
else:
isplit = line[:length_allow].rindex(' ') + 1
if (isplit < nindent + 1) or (isplit >= len(line)):
out = [line]
else:
out.append(line[:isplit] + cls.function_param.get(
'continuation_before', ''))
out += cls.split_line(
((nindent * ' ') + cls.function_param.get(
'continuation_after', '') + line[isplit:]),
length=length, force_split=force_split)
return out
@classmethod
def input2output(cls, var):
r"""Perform conversion necessary to turn a variable extracted from a
function definition from an input to an output.
Args:
var (dict): Variable definition.
Returns:
dict: Updated variable definition.
"""
return var
@classmethod
def output2input(cls, var, in_definition=True):
r"""Perform conversion necessary to turn an output variable
into an corresponding input that can be used to format a
function definition.
Args:
var (dict): Variable definition.
in_definition (bool, optional): If True, the returned
dictionary corresponds to an input variable in a
function definition. If False, the returned value
will correspond to an input to a function. Defaults to
True.
Returns:
dict: Updated variable definition.
"""
return var
@classmethod
def get_native_type(cls, **kwargs):
r"""Get the native type.
Args:
type (str, optional): Name of |yggdrasil| extended JSON
type or JSONSchema dictionary defining a datatype.
**kwargs: Additional keyword arguments may be used in determining
the precise declaration that should be used.
Returns:
str: The native type.
"""
if 'native_type' in kwargs:
return kwargs['native_type']
assert('json_type' not in kwargs)
json_type = kwargs.get('datatype', kwargs)
if isinstance(json_type, dict):
type_name = json_type.get('type', 'bytes')
else:
type_name = json_type
json_type = kwargs
if type_name == 'scalar':
type_name = json_type['subtype']
if (type_name == 'flag') and (type_name not in cls.type_map):
type_name = 'boolean'
return cls.type_map[type_name]
@classmethod
def get_json_type(cls, native_type):
r"""Get the JSON type from the native language type.
Args:
native_type (str): The native language type.
Returns:
str, dict: The JSON type.
"""
return cls.get_inverse_type_map()[native_type]
@classmethod
def write_finalize_iiter(cls, var):
r"""Get the lines necessary to finalize an input array for iteration.
Args:
var (dict, str): Name or information dictionary for the variable
finalized.
Returns:
list: The lines finalizing the variable.
"""
return []
@classmethod
def write_initialize_oiter(cls, var, value=None, requires_freeing=None):
r"""Get the lines necessary to initialize an array for iteration
output.
Args:
var (dict, str): Name or information dictionary for the variable
being initialized.
value (str, optional): Value that should be assigned to the
variable.
requires_freeing (list, optional): Existing list that variables
requiring freeing should be appended to. Defaults to None
and is ignored.
Returns:
list: The lines initializing the variable.
"""
return cls.write_initialize(var, value=value,
requires_freeing=requires_freeing)
@classmethod
def write_finalize_oiter(cls, var, value=None, requires_freeing=None):
r"""Get the lines necessary to finalize an array after iteration.
Args:
var (dict, str): Name or information dictionary for the variable
being initialized.
value (str, optional): Value that should be assigned to the
variable.
requires_freeing (list, optional): Existing list of variables
requiring freeing. Defaults to None and is ignored.
Returns:
list: The lines finalizing the variable.
"""
return []
@classmethod
def write_initialize(cls, var, value=None, requires_freeing=None):
r"""Get the code necessary to initialize a variable.
Args:
var (dict, str): Name or information dictionary for the variable
being declared.
value (str, optional): Value that should be assigned to the
variable after it is declared.
requires_freeing (list, optional): Existing list that variables
requiring freeing should be appended to. Defaults to None
and is ignored.
Returns:
list: The lines initializing the variable.
"""
out = []
if isinstance(var, str): # pragma: no cover
var = {'name': var}
if (value is None) and isinstance(var.get('datatype', False), dict):
init_type = 'init_%s' % var['datatype']['type']
free_type = 'free_%s' % var['datatype']['type']
if init_type in cls.function_param:
assert(free_type in cls.function_param)
# value = cls.format_function_param(init_type, **var['datatype'])
value = cls.function_param[init_type]
if requires_freeing is not None:
requires_freeing.append(var)
if value is not None:
out.append(cls.format_function_param(
'assign', name=var['name'], value=value))
return out
@classmethod
def write_declaration(cls, var, value=None, requires_freeing=None,
definitions=None, is_argument=False):
r"""Return the lines required to declare a variable with a certain
type.
Args:
var (dict, str): Name or information dictionary for the variable
being declared.
value (str, optional): Value that should be assigned to the
variable after it is declared.
requires_freeing (list, optional): Existing list that variables
requiring freeing should be appended to. Defaults to None
and is ignored.
definitions (list, optional): Existing list that variable
definitions should be added to. Defaults to None if not
provided and definitions will be included in the returned
lines.
dont_define (bool, optional): If True, the variable will not
be defined. Defaults to False.
is_argument (bool, optional): If True, the variable being
declared is an input argument. Defaults to False.
Returns:
list: The lines declaring the variable.
"""
if isinstance(var, str): # pragma: no cover
var = {'name': var}
type_name = cls.get_native_type(**var)
out = [cls.format_function_param('declare',
type_name=type_name,
variable=cls.get_name_declare(var))]
if is_argument:
return out
if definitions is None:
definitions = out
definitions += cls.write_initialize(var, value=value,
requires_freeing=requires_freeing)
return out
@classmethod
def get_name_declare(cls, var):
r"""Determine the name that should be used for declaration.
Args:
var (str, dict): Name of variable or dictionary of information.
Returns:
str: Modified name for declaration.
"""
if isinstance(var, str): # pragma: no cover
return var
assert(isinstance(var, dict))
out = var['name']
return out
@classmethod
def write_free(cls, var, **kwargs):
r"""Return the lines required to free a variable with a certain type.
Args:
var (dict, str): Name or information dictionary for the variable
being declared.
**kwargs: Additional keyword arguments are passed to format_function_param.
Returns:
list: The lines freeing the variable.
"""
if isinstance(var, str): # pragma: no cover
var = {'name': var}
out = []
if not var.get('dont_free', False):
if ((isinstance(var.get('datatype', False), dict)
and (('free_%s' % var['datatype']['type'])
in cls.function_param))):
out = [cls.format_function_param(
'free_%s' % var['datatype']['type'],
variable=var['name'], **kwargs)]
else:
out = [cls.format_function_param(
'free', variable=var['name'], **kwargs)]
return out
@classmethod
def write_assign_to_output(cls, dst_var, src_var, copy=False,
outputs_in_inputs=False, **kwargs):
r"""Write lines assigning a value to an output variable.
Args:
dst_var (str, dict): Name or information dictionary for
variable being assigned to.
src_var (str, dict): Name or information dictionary for
value being assigned to dst_var.
copy (bool, optional): If True, the assigned value is copied
during assignment. Defaults to False.
outputs_in_inputs (bool, optional): If True, outputs are passed
as input parameters. In some languages, this means that a
pointer or reference is passed (e.g. C) and so the assignment
should be to the memory indicated rather than the variable.
Defaults to False.
Returns:
list: Lines achieving assignment.
"""
datatype = None
if isinstance(dst_var, dict):
kwargs['name'] = dst_var['name']
datatype = dst_var['datatype']
else:
kwargs['name'] = dst_var
if isinstance(src_var, dict):
kwargs['value'] = src_var['name']
datatype = src_var['datatype']
else:
kwargs['value'] = src_var
if ((outputs_in_inputs and isinstance(dst_var, dict)
and isinstance(dst_var['datatype'], dict)
and ('copy_' + dst_var['datatype']['type']
in cls.function_param))):
copy = True
if copy:
if ((isinstance(datatype, dict)
and ('copy_' + datatype['type'] in cls.function_param))):
return [cls.format_function_param(
'copy_' + datatype['type'], **kwargs)]
else:
return [cls.format_function_param('assign_copy', **kwargs)]
else:
return [cls.format_function_param('assign', **kwargs)]
@classmethod
def write_expand_single_element(cls, output_var, add_cond=False):
r"""Write lines allowing extraction of the only element from a single
element array as a stand-alone variable if the variable is an array
and only has one element.
Args:
output_var (str): Name of the variable that should be conditionally
expanded.
add_cond (list, optional): Additional conditions that must be
satisfied for the array element to be extracted. Defaults to
False and is ignored.
Returns:
list: Lines added the conditional expansion of single element
arrays.
"""
if 'istype' not in cls.function_param:
return []
cond = ('(%s) %s (%s %s 1)' % (
cls.format_function_param('istype',
variable=output_var,
type=cls.type_map['array']),
cls.function_param.get('and', '&&'),
cls.format_function_param('len',
variable=output_var),
cls.function_param.get('equ', '==')))
if add_cond:
for x in add_cond:
cond += f" {cls.function_param.get('and', '&&')} {x}"
out = cls.write_if_block(
cond,
cls.format_function_param(
'assign', name=output_var,
value=cls.format_function_param(
'index', variable=output_var,
index=int(cls.function_param.get('first_index', 0)))))
return out
@classmethod
def split_variables(cls, var_str):
r"""Split variable string include individual variables.
Args:
var_str (str): String containing multiple variables.
Returns:
list: Split variables.
"""
out = []
if var_str:
pairs = [(r'\[', r'\]'),
(r'\(', r'\)'),
(r'\{', r'\}'),
(r"'", r"'"),
(r'"', r'"')]
regex_ele = r''
present = False
for p in pairs:
if not any([(str(ip)[-1] in var_str) for ip in p]):
continue
present = True
regex_ele += (r'(?:%s[.\n]*?%s)|' % p)
if present:
regex_ele += '(?:.+?)'
regex_ele = r'\s*(%s)\s*(?:,|$)' % regex_ele
out = [x.group(1) for x in re.finditer(regex_ele, var_str)]
else:
out = [x.strip() for x in var_str.split(',')]
return out
@classmethod
def prepare_variables(cls, vars_list, in_definition=False,
for_yggdrasil=False):
r"""Concatenate a set of input variables such that it can be passed as a
single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
input/output to/from a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying variables
in a function definition. Defaults to False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
name_list = []
if not isinstance(vars_list, list):
vars_list = [vars_list]
for x in vars_list:
if isinstance(x, str):
name_list.append(x)
else:
assert(isinstance(x, dict))
name_list.append(x['name'])
return ', '.join(name_list)
@classmethod
def prepare_input_variables(cls, vars_list, in_definition=False,
for_yggdrasil=False):
r"""Concatenate a set of input variables such that it can be passed as a
single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
input to a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying input
variables in a function definition. Defaults to False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
return cls.prepare_variables(vars_list, in_definition=in_definition,
for_yggdrasil=for_yggdrasil)
@classmethod
def prepare_output_variables(cls, vars_list, in_definition=False,
in_inputs=False, for_yggdrasil=False):
r"""Concatenate a set of output variables such that it can be passed as
a single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
output from a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying output
variables in a function definition. Defaults to False.
in_inputs (bool, optional): If True, the output variables should
be formated to be included as input variables. Defaults to
False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
if in_inputs:
vars_list = [cls.output2input(x, in_definition=in_definition)
for x in vars_list]
out = cls.prepare_variables(vars_list, in_definition=in_definition,
for_yggdrasil=for_yggdrasil)
if isinstance(vars_list, list) and (len(vars_list) > 1):
if in_definition and ('multiple_outputs_def' in cls.function_param):
out = cls.format_function_param('multiple_outputs_def', outputs=out)
elif 'multiple_outputs' in cls.function_param:
out = cls.format_function_param('multiple_outputs', outputs=out)
return out
@classmethod
def write_if_block(cls, cond, block_contents, else_block_contents=False):
r"""Return the lines required to complete a conditional block.
Args:
cond (str): Conditional that should determine block execution.
block_contents (list): Lines of code that should be executed inside
the block.
else_block_contents (list, optional): Lines of code that should be
executed inside the else clause of the block. Defaults to False
if not provided and an else clause is omitted.
Returns:
list: Lines of code performing conditional execution of a block.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
if not isinstance(cond, list):
cond = [cond]
block_contents = [block_contents]
assert(len(cond) == len(block_contents))
for i, (icond, iblock_contents) in enumerate(zip(cond, block_contents)):
if i == 0:
out.append(cls.format_function_param('if_begin', cond=icond))
else:
out.append(cls.format_function_param('if_elif', cond=icond))
if not isinstance(iblock_contents, (list, tuple)):
iblock_contents = [iblock_contents]
for x in iblock_contents:
out.append(cls.function_param['indent'] + x)
if else_block_contents:
out.append(cls.format_function_param('if_else'))
if not isinstance(else_block_contents, (list, tuple)):
else_block_contents = [else_block_contents]
for x in else_block_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('if_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def write_for_loop(cls, iter_var, iter_begin, iter_end, loop_contents):
r"""Return the lines required to complete a for loop.
Args:
iter_var (str): Name of variable that iterator should use.
iter_begin (int): Beginning of iteration.
iter_end (int): End of iteration.
loop_contents (list): Lines of code that should be executed inside
the loop.
Returns:
list: Lines of code performing a loop.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
# Opening for statement line
out.append(cls.format_function_param('for_begin', iter_var=iter_var,
iter_begin=iter_begin,
iter_end=iter_end))
# Indent loop contents
if not isinstance(loop_contents, (list, tuple)):
loop_contents = [loop_contents]
for x in loop_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('for_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def write_while_loop(cls, cond, loop_contents):
r"""Return the lines required to complete a for loop.
Args:
cond (str): Conditional that should determine loop execution.
loop_contents (list): Lines of code that should be executed inside
the loop.
Returns:
list: Lines of code performing a loop.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
# Opening for statement line
out.append(cls.format_function_param('while_begin', cond=cond))
# Indent loop contents
if not isinstance(loop_contents, (list, tuple)):
loop_contents = [loop_contents]
for x in loop_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('while_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def write_try_except(cls, try_contents, except_contents, error_var='e',
error_type=None):
r"""Return the lines required to complete a try/except block.
Args:
try_contents (list): Lines of code that should be executed inside
the try block.
except_contents (list): Lines of code that should be executed inside
the except block.
error_var (str, optional): Name of variable where the caught error
should be stored. Defaults to 'e'.
error_type (str, optional): Name of error type that should be caught.
If not provided, defaults to None and will be set based on the
class function_param entry for 'try_error_type'.
Returns:
Lines of code perfoming a try/except block.
"""
if (cls.function_param is None) or ('try_begin' not in cls.function_param):
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
if error_type is None:
error_type = cls.function_param.get('try_error_type', None)
out = []
# Try block contents
if not isinstance(try_contents, (list, tuple)):
try_contents = [try_contents]
out.append(cls.function_param['try_begin'])
for x in try_contents:
out.append(cls.function_param['indent'] + x)
# Except block contents
if not isinstance(except_contents, (list, tuple)):
except_contents = [except_contents]
out.append(cls.format_function_param('try_except', error_var=error_var,
error_type=error_type))
for x in except_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('try_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def get_testing_options(cls):
r"""Method to return a dictionary of testing options for this class.
Returns:
dict: Dictionary of variables to use for testing. Key/value pairs:
kwargs (dict): Keyword arguments for driver instance.
deps (list): Dependencies to install.
"""
out = dict(
kwargs={}, deps=[],
write_function_def_params=[
{'inputs': [{'name': 'x', 'value': 1.0,
'datatype': {'type': 'float',
'precision': 32,
'units': 'cm'}}],
'outputs': [{'name': 'y',
'datatype': {'type': 'float',
'precision': 32,
'units': 'cm'}}]}],
split_lines=[('abcdef', {'length': 3, 'force_split': True},
['abc', 'def']),
(' abc', {'length': 3, 'force_split': True},
[' abc'])])
return out
|
"""
API operations for Workflows
"""
import hashlib
import json
import logging
import os
from typing import (
Any,
Dict,
List,
Optional,
)
from fastapi import (
Body,
Path,
Query,
Response,
status,
)
from gxformat2._yaml import ordered_dump
from markupsafe import escape
from pydantic import Extra
from galaxy import (
exceptions,
model,
util,
)
from galaxy.files.uris import (
stream_url_to_str,
validate_uri_access,
)
from galaxy.managers.context import ProvidesUserContext
from galaxy.managers.jobs import (
fetch_job_states,
invocation_job_source_iter,
summarize_job_metrics,
)
from galaxy.managers.workflows import (
MissingToolsException,
RefactorRequest,
WorkflowCreateOptions,
WorkflowUpdateOptions,
)
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.schema.fields import EncodedDatabaseIdField
from galaxy.schema.schema import (
AsyncFile,
AsyncTaskResultSummary,
SetSlugPayload,
ShareWithPayload,
ShareWithStatus,
SharingStatus,
StoreContentSource,
WorkflowSortByEnum,
WriteStoreToPayload,
)
from galaxy.structured_app import StructuredApp
from galaxy.tool_shed.galaxy_install.install_manager import InstallRepositoryManager
from galaxy.tools import recommendations
from galaxy.tools.parameters import populate_state
from galaxy.tools.parameters.basic import workflow_building_modes
from galaxy.util.sanitize_html import sanitize_html
from galaxy.version import VERSION
from galaxy.web import (
expose_api,
expose_api_anonymous,
expose_api_anonymous_and_sessionless,
expose_api_raw,
expose_api_raw_anonymous_and_sessionless,
format_return_as_json,
)
from galaxy.webapps.base.controller import (
SharableMixin,
url_for,
UsesStoredWorkflowMixin,
)
from galaxy.webapps.base.webapp import GalaxyWebTransaction
from galaxy.webapps.galaxy.services.base import (
ConsumesModelStores,
ServesExportStores,
)
from galaxy.webapps.galaxy.services.invocations import (
InvocationIndexPayload,
InvocationSerializationParams,
InvocationsService,
PrepareStoreDownloadPayload,
)
from galaxy.webapps.galaxy.services.workflows import (
WorkflowIndexPayload,
WorkflowsService,
)
from galaxy.workflow.extract import extract_workflow
from galaxy.workflow.modules import module_factory
from galaxy.workflow.run import queue_invoke
from galaxy.workflow.run_request import build_workflow_run_configs
from . import (
BaseGalaxyAPIController,
depends,
DependsOnTrans,
IndexQueryTag,
Router,
search_query_param,
)
log = logging.getLogger(__name__)
router = Router(tags=["workflows"])
class CreateInvocationFromStore(StoreContentSource):
history_id: Optional[str]
class Config:
extra = Extra.allow
class WorkflowsAPIController(
BaseGalaxyAPIController,
UsesStoredWorkflowMixin,
UsesAnnotations,
SharableMixin,
ServesExportStores,
ConsumesModelStores,
):
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
def __init__(self, app: StructuredApp):
super().__init__(app)
self.history_manager = app.history_manager
self.workflow_manager = app.workflow_manager
self.workflow_contents_manager = app.workflow_contents_manager
self.tool_recommendations = recommendations.ToolRecommendations()
@expose_api
def get_workflow_menu(self, trans: ProvidesUserContext, **kwd):
"""
Get workflows present in the tools panel
GET /api/workflows/menu
"""
user = trans.user
ids_in_menu = [x.stored_workflow_id for x in user.stored_workflow_menu_entries]
workflows = self.get_workflows_list(trans, **kwd)
return {"ids_in_menu": ids_in_menu, "workflows": workflows}
@expose_api
def set_workflow_menu(self, trans: GalaxyWebTransaction, payload=None, **kwd):
"""
Save workflow menu to be shown in the tool panel
PUT /api/workflows/menu
"""
payload = payload or {}
user = trans.user
workflow_ids = payload.get("workflow_ids")
if workflow_ids is None:
workflow_ids = []
elif type(workflow_ids) != list:
workflow_ids = [workflow_ids]
workflow_ids_decoded = []
# Decode the encoded workflow ids
for ids in workflow_ids:
workflow_ids_decoded.append(trans.security.decode_id(ids))
sess = trans.sa_session
# This explicit remove seems like a hack, need to figure out
# how to make the association do it automatically.
for m in user.stored_workflow_menu_entries:
sess.delete(m)
user.stored_workflow_menu_entries = []
q = sess.query(model.StoredWorkflow)
# To ensure id list is unique
seen_workflow_ids = set()
for wf_id in workflow_ids_decoded:
if wf_id in seen_workflow_ids:
continue
else:
seen_workflow_ids.add(wf_id)
m = model.StoredWorkflowMenuEntry()
m.stored_workflow = q.get(wf_id)
user.stored_workflow_menu_entries.append(m)
sess.flush()
message = "Menu updated."
trans.set_message(message)
return {"message": message, "status": "done"}
def get_workflows_list(
self,
trans: ProvidesUserContext,
missing_tools=False,
show_published=None,
show_shared=None,
show_hidden=False,
show_deleted=False,
**kwd,
):
"""
Displays a collection of workflows.
:param show_published: Optional boolean to include published workflows
If unspecified this behavior depends on whether the request
is coming from an authenticated session. The default is true
for annonymous API requests and false otherwise.
:type show_published: boolean
:param show_hidden: if True, show hidden workflows
:type show_hidden: boolean
:param show_deleted: if True, show deleted workflows
:type show_deleted: boolean
:param show_shared: Optional boolean to include shared workflows.
If unspecified this behavior depends on show_deleted/show_hidden.
Defaulting to false if show_hidden or show_deleted is true or else
false.
:param missing_tools: if True, include a list of missing tools per workflow
:type missing_tools: boolean
"""
show_published = util.string_as_bool_or_none(show_published)
show_hidden = util.string_as_bool(show_hidden)
show_deleted = util.string_as_bool(show_deleted)
missing_tools = util.string_as_bool(missing_tools)
show_shared = util.string_as_bool_or_none(show_shared)
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
)
workflows, _ = self.service.index(trans, payload)
return workflows
@expose_api_anonymous_and_sessionless
def show(self, trans: GalaxyWebTransaction, id, **kwd):
"""
GET /api/workflows/{encoded_workflow_id}
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
Displays information needed to run a workflow.
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
if stored_workflow.importable is False and stored_workflow.user != trans.user and not trans.user_is_admin:
if (
trans.sa_session.query(model.StoredWorkflowUserShareAssociation)
.filter_by(user=trans.user, stored_workflow=stored_workflow)
.count()
== 0
):
message = "Workflow is neither importable, nor owned by or shared with current user"
raise exceptions.ItemAccessibilityException(message)
if kwd.get("legacy", False):
style = "legacy"
else:
style = "instance"
version = kwd.get("version")
if version is None and util.string_as_bool(kwd.get("instance", "false")):
# A Workflow instance may not be the latest workflow version attached to StoredWorkflow.
# This figures out the correct version so that we return the correct Workflow and version.
workflow_id = self.decode_id(id)
for i, workflow in enumerate(reversed(stored_workflow.workflows)):
if workflow.id == workflow_id:
version = i
break
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style=style, version=version)
@expose_api
def show_versions(self, trans: GalaxyWebTransaction, workflow_id, **kwds):
"""
GET /api/workflows/{encoded_workflow_id}/versions
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
Lists all versions of this workflow.
"""
instance = util.string_as_bool(kwds.get("instance", "false"))
stored_workflow = self.workflow_manager.get_stored_accessible_workflow(
trans, workflow_id, by_stored_id=not instance
)
return [
{"version": i, "update_time": str(w.update_time), "steps": len(w.steps)}
for i, w in enumerate(reversed(stored_workflow.workflows))
]
@expose_api
def create(self, trans: GalaxyWebTransaction, payload=None, **kwd):
"""
POST /api/workflows
Create workflows in various ways.
:param from_history_id: Id of history to extract a workflow from.
:type from_history_id: str
:param job_ids: If from_history_id is set - optional list of jobs to include when extracting a workflow from history
:type job_ids: str
:param dataset_ids: If from_history_id is set - optional list of HDA "hid"s corresponding to workflow inputs when extracting a workflow from history
:type dataset_ids: str
:param dataset_collection_ids: If from_history_id is set - optional list of HDCA "hid"s corresponding to workflow inputs when extracting a workflow from history
:type dataset_collection_ids: str
:param workflow_name: If from_history_id is set - name of the workflow to create when extracting a workflow from history
:type workflow_name: str
"""
ways_to_create = {
"archive_source",
"from_history_id",
"from_path",
"shared_workflow_id",
"workflow",
}
if trans.user_is_bootstrap_admin:
raise exceptions.RealUserRequiredException("Only real users can create or run workflows.")
if payload is None or len(ways_to_create.intersection(payload)) == 0:
message = f"One parameter among - {", ".join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterMissingException(message)
if len(ways_to_create.intersection(payload)) > 1:
message = f"Only one parameter among - {", ".join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterInvalidException(message)
if "archive_source" in payload:
archive_source = payload["archive_source"]
archive_file = payload.get("archive_file")
archive_data = None
if archive_source:
validate_uri_access(archive_source, trans.user_is_admin, trans.app.config.fetch_url_allowlist_ips)
if archive_source.startswith("file://"):
workflow_src = {"src": "from_path", "path": archive_source[len("file://") :]}
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
elif archive_source == "trs_tool":
trs_server = payload.get("trs_server")
trs_tool_id = payload.get("trs_tool_id")
trs_version_id = payload.get("trs_version_id")
import_source = None
archive_data = self.app.trs_proxy.get_version_descriptor(trs_server, trs_tool_id, trs_version_id)
else:
try:
archive_data = stream_url_to_str(
archive_source, trans.app.file_sources, prefix="gx_workflow_download"
)
import_source = "URL"
except Exception:
raise exceptions.MessageException(f"Failed to open URL '{escape(archive_source)}'.")
elif hasattr(archive_file, "file"):
uploaded_file = archive_file.file
uploaded_file_name = uploaded_file.name
if os.path.getsize(os.path.abspath(uploaded_file_name)) > 0:
archive_data = util.unicodify(uploaded_file.read())
import_source = "uploaded file"
else:
raise exceptions.MessageException("You attempted to upload an empty file.")
else:
raise exceptions.MessageException("Please provide a URL or file.")
return self.__api_import_from_archive(trans, archive_data, import_source, payload=payload)
if "from_history_id" in payload:
from_history_id = payload.get("from_history_id")
from_history_id = self.decode_id(from_history_id)
history = self.history_manager.get_accessible(from_history_id, trans.user, current_history=trans.history)
job_ids = [self.decode_id(_) for _ in payload.get("job_ids", [])]
dataset_ids = payload.get("dataset_ids", [])
dataset_collection_ids = payload.get("dataset_collection_ids", [])
workflow_name = payload["workflow_name"]
stored_workflow = extract_workflow(
trans=trans,
user=trans.user,
history=history,
job_ids=job_ids,
dataset_ids=dataset_ids,
dataset_collection_ids=dataset_collection_ids,
workflow_name=workflow_name,
)
item = stored_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["url"] = url_for("workflow", id=item["id"])
return item
if "from_path" in payload:
from_path = payload.get("from_path")
object_id = payload.get("object_id")
workflow_src = {"src": "from_path", "path": from_path}
if object_id is not None:
workflow_src["object_id"] = object_id
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
if "shared_workflow_id" in payload:
workflow_id = payload["shared_workflow_id"]
return self.__api_import_shared_workflow(trans, workflow_id, payload)
if "workflow" in payload:
return self.__api_import_new_workflow(trans, payload, **kwd)
# This was already raised above, but just in case...
raise exceptions.RequestParameterMissingException("No method for workflow creation supplied.")
@expose_api_raw_anonymous_and_sessionless
def workflow_dict(self, trans: GalaxyWebTransaction, workflow_id, **kwd):
"""
GET /api/workflows/{encoded_workflow_id}/download
Returns a selected workflow.
:type style: str
:param style: Style of export. The default is 'export', which is the meant to be used
with workflow import endpoints. Other formats such as 'instance', 'editor',
'run' are more tied to the GUI and should not be considered stable APIs.
The default format for 'export' is specified by the
admin with the `default_workflow_export_format` config
option. Style can be specified as either 'ga' or 'format2' directly
to be explicit about which format to download.
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
"""
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, **kwd)
style = kwd.get("style", "export")
download_format = kwd.get("format")
version = kwd.get("version")
history_id = kwd.get("history_id")
history = None
if history_id:
history = self.history_manager.get_accessible(
self.decode_id(history_id), trans.user, current_history=trans.history
)
ret_dict = self.workflow_contents_manager.workflow_to_dict(
trans, stored_workflow, style=style, version=version, history=history
)
if download_format == "json-download":
sname = stored_workflow.name
sname = "".join(c in util.FILENAME_VALID_CHARS and c or "_" for c in sname)[0:150]
if ret_dict.get("format-version", None) == "0.1":
extension = "ga"
else:
extension = "gxwf.json"
trans.response.headers[
"Content-Disposition"
] = f'attachment; filename="Galaxy-Workflow-{sname}.{extension}"'
trans.response.set_content_type("application/galaxy-archive")
if style == "format2" and download_format != "json-download":
return ordered_dump(ret_dict)
else:
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def delete(self, trans: ProvidesUserContext, id, **kwd):
"""
DELETE /api/workflows/{encoded_workflow_id}
Deletes a specified workflow
Author: rpark
copied from galaxy.web.controllers.workflows.py (delete)
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
# check to see if user has permissions to selected workflow
if stored_workflow.user != trans.user and not trans.user_is_admin:
raise exceptions.InsufficientPermissionsException()
# Mark a workflow as deleted
stored_workflow.deleted = True
trans.sa_session.flush()
# TODO: Unsure of response message to let api know that a workflow was successfully deleted
return f"Workflow '{stored_workflow.name}' successfully deleted"
@expose_api
def import_new_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
"""
POST /api/workflows/upload
Importing dynamic workflows from the api. Return newly generated workflow id.
Author: rpark
# currently assumes payload['workflow'] is a json representation of a workflow to be inserted into the database
Deprecated in favor to POST /api/workflows with encoded 'workflow' in
payload the same way.
"""
return self.__api_import_new_workflow(trans, payload, **kwd)
@expose_api
def update(self, trans: GalaxyWebTransaction, id, payload, **kwds):
"""
PUT /api/workflows/{id}
Update the workflow stored with ``id``.
:type id: str
:param id: the encoded id of the workflow to update
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false by default.
:type instance: boolean
:type payload: dict
:param payload: a dictionary containing any or all the
:workflow:
the json description of the workflow as would be
produced by GET workflows/<id>/download or
given to `POST workflows`
The workflow contents will be updated to target this.
:name:
optional string name for the workflow, if not present in payload,
name defaults to existing name
:annotation:
optional string annotation for the workflow, if not present in payload,
annotation defaults to existing annotation
:menu_entry:
optional boolean marking if the workflow should appear in the user\'s menu,
if not present, workflow menu entries are not modified
:tags:
optional list containing list of tags to add to the workflow (overwriting
existing tags), if not present, tags are not modified
:from_tool_form:
True iff encoded state coming in is encoded for the tool form.
:rtype: dict
:returns: serialized version of the workflow
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
workflow_dict = payload.get("workflow", {})
workflow_dict.update({k: v for k, v in payload.items() if k not in workflow_dict})
if workflow_dict:
require_flush = False
raw_workflow_description = self.__normalize_workflow(trans, workflow_dict)
workflow_dict = raw_workflow_description.as_dict
new_workflow_name = workflow_dict.get("name")
old_workflow = stored_workflow.latest_workflow
name_updated = new_workflow_name and new_workflow_name != stored_workflow.name
steps_updated = "steps" in workflow_dict
if name_updated and not steps_updated:
sanitized_name = sanitize_html(new_workflow_name or old_workflow.name)
workflow = old_workflow.copy(user=trans.user)
workflow.stored_workflow = stored_workflow
workflow.name = sanitized_name
stored_workflow.name = sanitized_name
stored_workflow.latest_workflow = workflow
trans.sa_session.add(workflow, stored_workflow)
require_flush = True
if "hidden" in workflow_dict and stored_workflow.hidden != workflow_dict["hidden"]:
stored_workflow.hidden = workflow_dict["hidden"]
require_flush = True
if "published" in workflow_dict and stored_workflow.published != workflow_dict["published"]:
stored_workflow.published = workflow_dict["published"]
require_flush = True
if "importable" in workflow_dict and stored_workflow.importable != workflow_dict["importable"]:
stored_workflow.importable = workflow_dict["importable"]
require_flush = True
if "annotation" in workflow_dict and not steps_updated:
newAnnotation = sanitize_html(workflow_dict["annotation"])
self.add_item_annotation(trans.sa_session, trans.user, stored_workflow, newAnnotation)
require_flush = True
if "menu_entry" in workflow_dict or "show_in_tool_panel" in workflow_dict:
show_in_panel = workflow_dict.get("menu_entry") or workflow_dict.get("show_in_tool_panel")
stored_workflow_menu_entries = trans.user.stored_workflow_menu_entries
decoded_id = trans.security.decode_id(id)
if show_in_panel:
workflow_ids = [wf.stored_workflow_id for wf in stored_workflow_menu_entries]
if decoded_id not in workflow_ids:
menu_entry = model.StoredWorkflowMenuEntry()
menu_entry.stored_workflow = stored_workflow
stored_workflow_menu_entries.append(menu_entry)
trans.sa_session.add(menu_entry)
require_flush = True
else:
# remove if in list
entries = {x.stored_workflow_id: x for x in stored_workflow_menu_entries}
if decoded_id in entries:
stored_workflow_menu_entries.remove(entries[decoded_id])
require_flush = True
# set tags
if "tags" in workflow_dict:
trans.app.tag_handler.set_tags_from_list(
user=trans.user, item=stored_workflow, new_tags_list=workflow_dict["tags"]
)
if require_flush:
trans.sa_session.flush()
if "steps" in workflow_dict:
try:
workflow_update_options = WorkflowUpdateOptions(**payload)
workflow, errors = self.workflow_contents_manager.update_workflow_from_raw_description(
trans,
stored_workflow,
raw_workflow_description,
workflow_update_options,
)
except MissingToolsException:
raise exceptions.MessageException(
"This workflow contains missing tools. It cannot be saved until they have been removed from the workflow or installed."
)
else:
message = "Updating workflow requires dictionary containing 'workflow' attribute with new JSON description."
raise exceptions.RequestParameterInvalidException(message)
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style="instance")
@expose_api
def refactor(self, trans, id, payload, **kwds):
"""
* PUT /api/workflows/{id}/refactor
updates the workflow stored with ``id``
:type id: str
:param id: the encoded id of the workflow to update
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
:type payload: dict
:param payload: a dictionary containing list of actions to apply.
:rtype: dict
:returns: serialized version of the workflow
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
refactor_request = RefactorRequest(**payload)
return self.workflow_contents_manager.refactor(trans, stored_workflow, refactor_request)
@expose_api
def build_module(self, trans: GalaxyWebTransaction, payload=None):
"""
POST /api/workflows/build_module
Builds module models for the workflow editor.
"""
if payload is None:
payload = {}
inputs = payload.get("inputs", {})
trans.workflow_building_mode = workflow_building_modes.ENABLED
module = module_factory.from_dict(trans, payload, from_tool_form=True)
if "tool_state" not in payload:
module_state: Dict[str, Any] = {}
populate_state(trans, module.get_inputs(), inputs, module_state, check=False)
module.recover_state(module_state, from_tool_form=True)
return {
"label": inputs.get("__label", ""),
"annotation": inputs.get("__annotation", ""),
"name": module.get_name(),
"tool_state": module.get_state(),
"content_id": module.get_content_id(),
"inputs": module.get_all_inputs(connectable_only=True),
"outputs": module.get_all_outputs(),
"config_form": module.get_config_form(),
"post_job_actions": module.get_post_job_actions(inputs),
}
@expose_api
def get_tool_predictions(self, trans: ProvidesUserContext, payload, **kwd):
"""
POST /api/workflows/get_tool_predictions
Fetch predicted tools for a workflow
:type payload: dict
:param payload:
a dictionary containing two parameters
'tool_sequence' - comma separated sequence of tool ids
'remote_model_url' - (optional) path to the deep learning model
"""
remote_model_url = payload.get("remote_model_url", trans.app.config.tool_recommendation_model_path)
tool_sequence = payload.get("tool_sequence", "")
if "tool_sequence" not in payload or remote_model_url is None:
return
tool_sequence, recommended_tools = self.tool_recommendations.get_predictions(
trans, tool_sequence, remote_model_url
)
return {"current_tool": tool_sequence, "predicted_data": recommended_tools}
#
# -- Helper methods --
#
def __api_import_from_archive(self, trans: GalaxyWebTransaction, archive_data, source=None, payload=None):
payload = payload or {}
try:
data = json.loads(archive_data)
except Exception:
if "GalaxyWorkflow" in archive_data:
data = {"yaml_content": archive_data}
else:
raise exceptions.MessageException("The data content does not appear to be a valid workflow.")
if not data:
raise exceptions.MessageException("The data content is missing.")
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans, raw_workflow_description, workflow_create_options, source=source
)
workflow_id = workflow.id
workflow = workflow.latest_workflow
response = {
"message": f"Workflow '{escape(workflow.name)}' imported successfully.",
"status": "success",
"id": trans.security.encode_id(workflow_id),
}
if workflow.has_errors:
response["message"] = "Imported, but some steps in this workflow have validation errors."
response["status"] = "error"
elif len(workflow.steps) == 0:
response["message"] = "Imported, but this workflow has no steps."
response["status"] = "error"
elif workflow.has_cycles:
response["message"] = "Imported, but this workflow contains cycles."
response["status"] = "error"
return response
def __api_import_new_workflow(self, trans: GalaxyWebTransaction, payload, **kwd):
data = payload["workflow"]
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans,
raw_workflow_description,
workflow_create_options,
)
# galaxy workflow newly created id
workflow_id = workflow.id
# api encoded, id
encoded_id = trans.security.encode_id(workflow_id)
item = workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["annotations"] = [x.annotation for x in workflow.annotations]
item["url"] = url_for("workflow", id=encoded_id)
item["owner"] = workflow.user.username
item["number_of_steps"] = len(workflow.latest_workflow.steps)
return item
def __normalize_workflow(self, trans: GalaxyWebTransaction, as_dict):
return self.workflow_contents_manager.normalize_workflow_format(trans, as_dict)
@expose_api
def import_shared_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
"""
POST /api/workflows/import
Import a workflow shared by other users.
:param workflow_id: the workflow id (required)
:type workflow_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
# Pull parameters out of payload.
workflow_id = payload.get("workflow_id", None)
if workflow_id is None:
raise exceptions.ObjectAttributeMissingException("Missing required parameter 'workflow_id'.")
self.__api_import_shared_workflow(trans, workflow_id, payload)
def __api_import_shared_workflow(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
try:
stored_workflow = self.get_stored_workflow(trans, workflow_id, check_ownership=False)
except Exception:
raise exceptions.ObjectNotFound(f"Malformed workflow id ( {workflow_id} ) specified.")
if stored_workflow.importable is False:
raise exceptions.ItemAccessibilityException(
"The owner of this workflow has disabled imports via this link."
)
elif stored_workflow.deleted:
raise exceptions.ItemDeletionException("You can't import this workflow because it has been deleted.")
imported_workflow = self._import_shared_workflow(trans, stored_workflow)
item = imported_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
encoded_id = trans.security.encode_id(imported_workflow.id)
item["url"] = url_for("workflow", id=encoded_id)
return item
@expose_api
def invoke(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
"""
POST /api/workflows/{encoded_workflow_id}/invocations
Schedule the workflow specified by `workflow_id` to run.
.. note:: This method takes the same arguments as
:func:`galaxy.webapps.galaxy.api.workflows.WorkflowsAPIController.create` above.
:raises: exceptions.MessageException, exceptions.RequestParameterInvalidException
"""
# Get workflow + accessibility check.
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, instance=kwd.get("instance", False))
workflow = stored_workflow.latest_workflow
run_configs = build_workflow_run_configs(trans, workflow, payload)
is_batch = payload.get("batch")
if not is_batch and len(run_configs) != 1:
raise exceptions.RequestParameterInvalidException("Must specify 'batch' to use batch parameters.")
require_exact_tool_versions = util.string_as_bool(payload.get("require_exact_tool_versions", "true"))
tools = self.workflow_contents_manager.get_all_tools(workflow)
missing_tools = [
tool
for tool in tools
if not self.app.toolbox.has_tool(
tool["tool_id"], tool_version=tool["tool_version"], exact=require_exact_tool_versions
)
]
if missing_tools:
missing_tools_message = "Workflow was not invoked; the following required tools are not installed: "
if require_exact_tool_versions:
missing_tools_message += ", ".join(
[f"{tool["tool_id"]} (version {tool["tool_version"]})" for tool in missing_tools]
)
else:
missing_tools_message += ", ".join([tool["tool_id"] for tool in missing_tools])
raise exceptions.MessageException(missing_tools_message)
invocations = []
for run_config in run_configs:
workflow_scheduler_id = payload.get("scheduler", None)
# TODO: workflow scheduler hints
work_request_params = dict(scheduler=workflow_scheduler_id)
workflow_invocation = queue_invoke(
trans=trans,
workflow=workflow,
workflow_run_config=run_config,
request_params=work_request_params,
flush=False,
)
invocations.append(workflow_invocation)
trans.sa_session.flush()
invocations = [self.encode_all_ids(trans, invocation.to_dict(), recursive=True) for invocation in invocations]
if is_batch:
return invocations
else:
return invocations[0]
@expose_api
def index_invocations(self, trans: GalaxyWebTransaction, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations
GET /api/invocations
Get the list of a user's workflow invocations. If workflow_id is supplied
(either via URL or query parameter) it should be an encoded StoredWorkflow id
and returned invocations will be restricted to that workflow. history_id (an encoded
History id) can be used to further restrict the query. If neither a workflow_id or
history_id is supplied, all the current user's workflow invocations will be indexed
(as determined by the invocation being executed on one of the user's histories).
:param workflow_id: an encoded stored workflow id to restrict query to
:type workflow_id: str
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
:param history_id: an encoded history id to restrict query to
:type history_id: str
:param job_id: an encoded job id to restrict query to
:type job_id: str
:param user_id: an encoded user id to restrict query to, must be own id if not admin user
:type user_id: str
:param view: level of detail to return per invocation 'element' or 'collection'.
:type view: str
:param step_details: If 'view' is 'element', also include details on individual steps.
:type step_details: bool
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
invocation_payload = InvocationIndexPayload(**kwd)
serialization_params = InvocationSerializationParams(**kwd)
invocations, total_matches = self.invocations_service.index(trans, invocation_payload, serialization_params)
trans.response.headers["total_matches"] = total_matches
return invocations
@expose_api_anonymous
def create_invocations_from_store(self, trans, payload, **kwd):
"""
POST /api/invocations/from_store
Create invocation(s) from a supplied model store.
Input can be an archive describing a Galaxy model store containing an
workflow invocation - for instance one created with with write_store
or prepare_store_download endpoint.
"""
create_payload = CreateInvocationFromStore(**payload)
serialization_params = InvocationSerializationParams(**payload)
# refactor into a service...
return self._create_from_store(trans, create_payload, serialization_params)
def _create_from_store(
self, trans, payload: CreateInvocationFromStore, serialization_params: InvocationSerializationParams
):
history = self.history_manager.get_owned(
self.decode_id(payload.history_id), trans.user, current_history=trans.history
)
object_tracker = self.create_objects_from_store(
trans,
payload,
history=history,
)
return self.invocations_service.serialize_workflow_invocations(
object_tracker.invocations_by_key.values(), serialization_params
)
@expose_api
def show_invocation(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}
GET /api/invocations/{invocation_id}
Get detailed description of workflow invocation
:param invocation_id: the invocation id (required)
:type invocation_id: str
:param step_details: fetch details about individual invocation steps
and populate a steps attribute in the resulting
dictionary. Defaults to false.
:type step_details: bool
:param legacy_job_state: If step_details is true, and this is set to true
populate the invocation step state with the job state
instead of the invocation step state. This will also
produce one step per job in mapping jobs to mimic the
older behavior with respect to collections. Partially
scheduled steps may provide incomplete information
and the listed steps outputs are the mapped over
step outputs but the individual job outputs
when this is set - at least for now.
:type legacy_job_state: bool
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id, eager=True)
if not workflow_invocation:
raise exceptions.ObjectNotFound()
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def cancel_invocation(self, trans: ProvidesUserContext, invocation_id, **kwd):
"""
DELETE /api/workflows/{workflow_id}/invocations/{invocation_id}
DELETE /api/invocations/{invocation_id}
Cancel the specified workflow invocation.
:param invocation_id: the usage id (required)
:type invocation_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.cancel_invocation(trans, decoded_workflow_invocation_id)
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def show_invocation_report(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/report
GET /api/invocations/{invocation_id}/report
Get JSON summarizing invocation for reporting.
"""
kwd["format"] = "json"
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
@expose_api_raw
def show_invocation_report_pdf(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/report.pdf
GET /api/invocations/{invocation_id}/report.pdf
Get JSON summarizing invocation for reporting.
"""
kwd["format"] = "pdf"
trans.response.set_content_type("application/pdf")
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
def _generate_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id)
history = workflow_invocation.history
workflow = workflow_invocation.workflow
stored_workflow = workflow.stored_workflow
# pull in the user info from those who the history and workflow has been shared with
contributing_users = [stored_workflow.user]
# may want to extend this to have more reviewers.
reviewing_users = [stored_workflow.user]
encoded_workflow_id = trans.security.encode_id(stored_workflow.id)
encoded_history_id = trans.security.encode_id(history.id)
dict_workflow = json.loads(self.workflow_dict(trans, encoded_workflow_id))
spec_version = kwd.get("spec_version", "https://w3id.org/ieee/ieee-2791-schema/2791object.json")
for i, w in enumerate(reversed(stored_workflow.workflows)):
if workflow == w:
current_version = i
contributors = []
for contributing_user in contributing_users:
contributor = {
"orcid": kwd.get("xref", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": ["authoredBy"],
"email": contributing_user.email,
}
contributors.append(contributor)
reviewers = []
for reviewer in reviewing_users:
reviewer = {
"status": "approved",
"reviewer_comment": "",
"date": workflow_invocation.update_time.isoformat(),
"reviewer": {
"orcid": kwd.get("orcid", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": "curatedBy",
"email": contributing_user.email,
},
}
reviewers.append(reviewer)
provenance_domain = {
"name": workflow.name,
"version": current_version,
"review": reviewers,
"derived_from": url_for("workflow", id=encoded_workflow_id, qualified=True),
"created": workflow_invocation.create_time.isoformat(),
"modified": workflow_invocation.update_time.isoformat(),
"contributors": contributors,
"license": "https://spdx.org/licenses/CC-BY-4.0.html",
}
keywords = []
for tag in stored_workflow.tags:
keywords.append(tag.user_tname)
for tag in history.tags:
if tag.user_tname not in keywords:
keywords.append(tag.user_tname)
metrics = {}
tools, input_subdomain, output_subdomain, pipeline_steps, software_prerequisites = [], [], [], [], []
for step in workflow_invocation.steps:
if step.workflow_step.type == "tool":
workflow_outputs_list, output_list, input_list = set(), [], []
for wo in step.workflow_step.workflow_outputs:
workflow_outputs_list.add(wo.output_name)
for job in step.jobs:
metrics[i] = summarize_job_metrics(trans, job)
for job_input in job.input_datasets:
if hasattr(job_input.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_input.dataset.dataset_id)
input_obj = {
# TODO: that should maybe be a step prefix + element identifier where appropriate.
"filename": job_input.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_input.dataset.create_time.isoformat(),
}
input_list.append(input_obj)
for job_output in job.output_datasets:
if hasattr(job_output.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_output.dataset.dataset_id)
output_obj = {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
}
output_list.append(output_obj)
if job_output.name in workflow_outputs_list:
output = {
"mediatype": job_output.dataset.extension,
"uri": {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
},
}
output_subdomain.append(output)
workflow_step = step.workflow_step
step_index = workflow_step.order_index
current_step = dict_workflow["steps"][str(step_index)]
pipeline_step = {
"step_number": step_index,
"name": current_step["name"],
"description": current_step["annotation"],
"version": current_step["tool_version"],
"prerequisite": kwd.get("prerequisite", []),
"input_list": input_list,
"output_list": output_list,
}
pipeline_steps.append(pipeline_step)
try:
software_prerequisite = {
"name": current_step["content_id"],
"version": current_step["tool_version"],
"uri": {"uri": current_step["content_id"], "access_time": current_step["uuid"]},
}
if software_prerequisite["uri"]["uri"] not in tools:
software_prerequisites.append(software_prerequisite)
tools.append(software_prerequisite["uri"]["uri"])
except Exception:
continue
if step.workflow_step.type == "data_input" and step.output_datasets:
for output_assoc in step.output_datasets:
encoded_dataset_id = trans.security.encode_id(output_assoc.dataset_id)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content", history_id=encoded_history_id, id=encoded_dataset_id, qualified=True
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
if step.workflow_step.type == "data_collection_input" and step.output_dataset_collections:
for output_dataset_collection_association in step.output_dataset_collections:
encoded_dataset_id = trans.security.encode_id(
output_dataset_collection_association.dataset_collection_id
)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
type="dataset_collection",
qualified=True,
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
usability_domain = []
for a in stored_workflow.annotations:
usability_domain.append(a.annotation)
for h in history.annotations:
usability_domain.append(h.annotation)
parametric_domain = []
for inv_step in workflow_invocation.steps:
try:
for k, v in inv_step.workflow_step.tool_inputs.items():
param, value, step = k, v, inv_step.workflow_step.order_index
parametric_domain.append({"param": param, "value": value, "step": step})
except Exception:
continue
execution_domain = {
"script_access_type": "a_galaxy_workflow",
"script": [url_for("workflows", encoded_workflow_id=encoded_workflow_id, qualified=True)],
"script_driver": "Galaxy",
"software_prerequisites": software_prerequisites,
"external_data_endpoints": [
{"name": "Access to Galaxy", "url": url_for("/", qualified=True)},
kwd.get("external_data_endpoints"),
],
"environment_variables": kwd.get("environment_variables", {}),
}
extension = [
{
"extension_schema": "https://raw.githubusercontent.com/biocompute-objects/extension_domain/6d2cd8482e6075746984662edcf78b57d3d38065/galaxy/galaxy_extension.json",
"galaxy_extension": {
"galaxy_url": url_for("/", qualified=True),
"galaxy_version": VERSION,
# TODO:
# 'aws_estimate': aws_estimate,
# 'job_metrics': metrics
},
}
]
error_domain = {
"empirical_error": kwd.get("empirical_error", []),
"algorithmic_error": kwd.get("algorithmic_error", []),
}
bco_dict = {
"provenance_domain": provenance_domain,
"usability_domain": usability_domain,
"extension_domain": extension,
"description_domain": {
"keywords": keywords,
"xref": kwd.get("xref", []),
"platform": ["Galaxy"],
"pipeline_steps": pipeline_steps,
},
"execution_domain": execution_domain,
"parametric_domain": parametric_domain,
"io_domain": {
"input_subdomain": input_subdomain,
"output_subdomain": output_subdomain,
},
"error_domain": error_domain,
}
# Generate etag from the BCO excluding object_id and spec_version, as
# specified in https://github.com/biocompute-objects/BCO_Specification/blob/main/docs/top-level.md#203-etag-etag
etag = hashlib.sha256(json.dumps(bco_dict, sort_keys=True).encode()).hexdigest()
bco_dict.update(
{
"object_id": url_for(
controller=f"api/invocations/{invocation_id}", action="biocompute", qualified=True
),
"spec_version": spec_version,
"etag": etag,
}
)
return bco_dict
@expose_api
def export_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/invocations/{invocations_id}/biocompute
Return a BioCompute Object for the workflow invocation.
The BioCompute Object endpoints are in beta - important details such
as how inputs and outputs are represented, how the workflow is encoded,
and how author and version information is encoded, and how URLs are
generated will very likely change in important ways over time.
"""
return self._generate_invocation_bco(trans, invocation_id, **kwd)
@expose_api_raw
def download_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/invocations/{invocations_id}/biocompute/download
Returns a selected BioCompute Object as a file for download (HTTP
headers configured with filename and such).
The BioCompute Object endpoints are in beta - important details such
as how inputs and outputs are represented, how the workflow is encoded,
and how author and version information is encoded, and how URLs are
generated will very likely change in important ways over time.
"""
ret_dict = self._generate_invocation_bco(trans, invocation_id, **kwd)
trans.response.headers["Content-Disposition"] = f'attachment; filename="bco_{invocation_id}.json"'
trans.response.set_content_type("application/json")
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def invocation_step(self, trans, invocation_id, step_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/steps/{step_id}
GET /api/invocations/{invocation_id}/steps/{step_id}
:param invocation_id: the invocation id (required)
:type invocation_id: str
:param step_id: encoded id of the WorkflowInvocationStep (required)
:type step_id: str
:param payload: payload containing update action information
for running workflow.
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_invocation_step_id = self.decode_id(step_id)
invocation_step = self.workflow_manager.get_invocation_step(trans, decoded_invocation_step_id)
return self.__encode_invocation_step(trans, invocation_step)
@expose_api_anonymous_and_sessionless
def invocation_step_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/step_jobs_summary
GET /api/invocations/{invocation_id}/step_jobs_summary
return job state summary info aggregated across per step of the workflow invocation
Warning: We allow anyone to fetch job state information about any object they
can guess an encoded ID for - it isn't considered protected data. This keeps
polling IDs as part of state calculation for large histories and collections as
efficient as possible.
:param invocation_id: the invocation id (required)
:type invocation_id: str
:rtype: dict[]
:returns: an array of job summary object dictionaries for each step
"""
decoded_invocation_id = self.decode_id(invocation_id)
ids = []
types = []
for (job_source_type, job_source_id, _) in invocation_job_source_iter(trans.sa_session, decoded_invocation_id):
ids.append(job_source_id)
types.append(job_source_type)
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)]
@expose_api_anonymous_and_sessionless
def invocation_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/jobs_summary
GET /api/invocations/{invocation_id}/jobs_summary
return job state summary info aggregated across all current jobs of workflow invocation
Warning: We allow anyone to fetch job state information about any object they
can guess an encoded ID for - it isn't considered protected data. This keeps
polling IDs as part of state calculation for large histories and collections as
efficient as possible.
:param invocation_id: the invocation id (required)
:type invocation_id: str
:rtype: dict
:returns: a job summary object merged for all steps in workflow invocation
"""
ids = [self.decode_id(invocation_id)]
types = ["WorkflowInvocation"]
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)][0]
@expose_api
def update_invocation_step(self, trans: GalaxyWebTransaction, invocation_id, step_id, payload, **kwd):
"""
PUT /api/workflows/{workflow_id}/invocations/{invocation_id}/steps/{step_id}
PUT /api/invocations/{invocation_id}/steps/{step_id}
Update state of running workflow step invocation - still very nebulous
but this would be for stuff like confirming paused steps can proceed
etc....
:param invocation_id: the usage id (required)
:type invocation_id: str
:param step_id: encoded id of the WorkflowInvocationStep (required)
:type step_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_invocation_step_id = self.decode_id(step_id)
action = payload.get("action", None)
invocation_step = self.workflow_manager.update_invocation_step(
trans,
decoded_invocation_step_id,
action=action,
)
return self.__encode_invocation_step(trans, invocation_step)
def _workflow_from_dict(self, trans, data, workflow_create_options, source=None):
"""Creates a workflow from a dict.
Created workflow is stored in the database and returned.
"""
publish = workflow_create_options.publish
importable = workflow_create_options.is_importable
if publish and not importable:
raise exceptions.RequestParameterInvalidException("Published workflow must be importable.")
workflow_contents_manager = self.app.workflow_contents_manager
raw_workflow_description = workflow_contents_manager.ensure_raw_description(data)
created_workflow = workflow_contents_manager.build_workflow_from_raw_description(
trans,
raw_workflow_description,
workflow_create_options,
source=source,
)
if importable:
self._make_item_accessible(trans.sa_session, created_workflow.stored_workflow)
trans.sa_session.flush()
self._import_tools_if_needed(trans, workflow_create_options, raw_workflow_description)
return created_workflow.stored_workflow, created_workflow.missing_tools
def _import_tools_if_needed(self, trans, workflow_create_options, raw_workflow_description):
if not workflow_create_options.import_tools:
return
if not trans.user_is_admin:
raise exceptions.AdminRequiredException()
data = raw_workflow_description.as_dict
tools = {}
for key in data["steps"]:
item = data["steps"][key]
if item is not None:
if "tool_shed_repository" in item:
tool_shed_repository = item["tool_shed_repository"]
if (
"owner" in tool_shed_repository
and "changeset_revision" in tool_shed_repository
and "name" in tool_shed_repository
and "tool_shed" in tool_shed_repository
):
toolstr = (
tool_shed_repository["owner"]
+ tool_shed_repository["changeset_revision"]
+ tool_shed_repository["name"]
+ tool_shed_repository["tool_shed"]
)
tools[toolstr] = tool_shed_repository
irm = InstallRepositoryManager(self.app)
install_options = workflow_create_options.install_options
for k in tools:
item = tools[k]
tool_shed_url = f"https://{item["tool_shed"]}/"
name = item["name"]
owner = item["owner"]
changeset_revision = item["changeset_revision"]
irm.install(tool_shed_url, name, owner, changeset_revision, install_options)
def __encode_invocation_step(self, trans: ProvidesUserContext, invocation_step):
return self.encode_all_ids(trans, invocation_step.to_dict("element"), True)
def __get_stored_accessible_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_accessible_workflow(trans, workflow_id, by_stored_id=not instance)
def __get_stored_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_workflow(trans, workflow_id, by_stored_id=not instance)
def __encode_invocation(self, invocation, **kwd):
params = InvocationSerializationParams(**kwd)
return self.invocations_service.serialize_workflow_invocation(invocation, params)
StoredWorkflowIDPathParam: EncodedDatabaseIdField = Path(
..., title="Stored Workflow ID", description="The encoded database identifier of the Stored Workflow."
)
InvocationIDPathParam: EncodedDatabaseIdField = Path(
..., title="Invocation ID", description="The encoded database identifier of the Invocation."
)
DeletedQueryParam: bool = Query(
default=False, title="Display deleted", description="Whether to restrict result to deleted workflows."
)
HiddenQueryParam: bool = Query(
default=False, title="Display hidden", description="Whether to restrict result to hidden workflows."
)
MissingToolsQueryParam: bool = Query(
default=False,
title="Display missing tools",
description="Whether to include a list of missing tools per workflow entry",
)
ShowPublishedQueryParam: Optional[bool] = Query(default=None, title="Include published workflows.", description="")
ShowSharedQueryParam: Optional[bool] = Query(
default=None, title="Include workflows shared with authenticated user.", description=""
)
SortByQueryParam: Optional[WorkflowSortByEnum] = Query(
default=None,
title="Sort workflow index by this attribute",
description="In unspecified, default ordering depends on other parameters but generally the user's own workflows appear first based on update time",
)
SortDescQueryParam: Optional[bool] = Query(
default=None,
title="Sort Descending",
description="Sort in descending order?",
)
LimitQueryParam: Optional[int] = Query(default=None, title="Limit number of queries.")
OffsetQueryParam: Optional[int] = Query(
default=0,
title="Number of workflows to skip in sorted query (to enable pagination).",
)
query_tags = [
IndexQueryTag("name", "The stored workflow's name.", "n"),
IndexQueryTag(
"tag",
"The workflow's tag, if the tag contains a colon an approach will be made to match the key and value of the tag separately.",
"t",
),
IndexQueryTag("user", "The stored workflow's owner's username.", "u"),
IndexQueryTag(
"is:published",
"Include only published workflows in the final result. Be sure the the query parameter `show_published` is set to `true` if to include all published workflows and not just the requesting user's.",
),
IndexQueryTag(
"is:share_with_me",
"Include only workflows shared with the requesting user. Be sure the the query parameter `show_shared` is set to `true` if to include shared workflows.",
),
]
SearchQueryParam: Optional[str] = search_query_param(
model_name="Stored Workflow",
tags=query_tags,
free_text_fields=["name", "tag", "user"],
)
SkipStepCountsQueryParam: bool = Query(
default=False,
title="Skip step counts.",
description="Set this to true to skip joining workflow step counts and optimize the resulting index query. Response objects will not contain step counts.",
)
@router.cbv
class FastAPIWorkflows:
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
@router.get(
"/api/workflows",
summary="Lists stored workflows viewable by the user.",
response_description="A list with summary stored workflow information per viewable entry.",
)
def index(
self,
response: Response,
trans: ProvidesUserContext = DependsOnTrans,
show_deleted: bool = DeletedQueryParam,
show_hidden: bool = HiddenQueryParam,
missing_tools: bool = MissingToolsQueryParam,
show_published: Optional[bool] = ShowPublishedQueryParam,
show_shared: Optional[bool] = ShowSharedQueryParam,
sort_by: Optional[WorkflowSortByEnum] = SortByQueryParam,
sort_desc: Optional[bool] = SortDescQueryParam,
limit: Optional[int] = LimitQueryParam,
offset: Optional[int] = OffsetQueryParam,
search: Optional[str] = SearchQueryParam,
skip_step_counts: bool = SkipStepCountsQueryParam,
) -> List[Dict[str, Any]]:
"""Lists stored workflows viewable by the user."""
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
sort_by=sort_by,
sort_desc=sort_desc,
limit=limit,
offset=offset,
search=search,
skip_step_counts=skip_step_counts,
)
workflows, total_matches = self.service.index(trans, payload, include_total_count=True)
response.headers["total_matches"] = str(total_matches)
return workflows
@router.get(
"/api/workflows/{id}/sharing",
summary="Get the current sharing status of the given item.",
)
def sharing(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Return the sharing status of the item."""
return self.service.shareable_service.sharing(trans, id)
@router.put(
"/api/workflows/{id}/enable_link_access",
summary="Makes this item accessible by a URL link.",
)
def enable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item accessible by a URL link and return the current sharing status."""
return self.service.shareable_service.enable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/disable_link_access",
summary="Makes this item inaccessible by a URL link.",
)
def disable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item inaccessible by a URL link and return the current sharing status."""
return self.service.shareable_service.disable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/publish",
summary="Makes this item public and accessible by a URL link.",
)
def publish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item publicly available by a URL link and return the current sharing status."""
return self.service.shareable_service.publish(trans, id)
@router.put(
"/api/workflows/{id}/unpublish",
summary="Removes this item from the published list.",
)
def unpublish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Removes this item from the published list and return the current sharing status."""
return self.service.shareable_service.unpublish(trans, id)
@router.put(
"/api/workflows/{id}/share_with_users",
summary="Share this item with specific users.",
)
def share_with_users(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: ShareWithPayload = Body(...),
) -> ShareWithStatus:
"""Shares this item with specific users and return the current sharing status."""
return self.service.shareable_service.share_with_users(trans, id, payload)
@router.put(
"/api/workflows/{id}/slug",
summary="Set a new slug for this shared item.",
status_code=status.HTTP_204_NO_CONTENT,
)
def set_slug(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: SetSlugPayload = Body(...),
):
"""Sets a new slug to access this item by URL. The new slug must be unique."""
self.service.shareable_service.set_slug(trans, id, payload)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post(
"/api/invocations/{invocation_id}/prepare_store_download",
summary="Prepare a worklfow invocation export-style download.",
)
def prepare_store_download(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: PrepareStoreDownloadPayload = Body(...),
) -> AsyncFile:
return self.invocations_service.prepare_store_download(
trans,
invocation_id,
payload,
)
@router.post(
"/api/invocations/{invocation_id}/write_store",
summary="Prepare a worklfow invocation export-style download and write to supplied URI.",
)
def write_store(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: WriteStoreToPayload = Body(...),
) -> AsyncTaskResultSummary:
rval = self.invocations_service.write_store(
trans,
invocation_id,
payload,
)
return rval
|
"""
API operations for Workflows
"""
import hashlib
import json
import logging
import os
from typing import (
Any,
Dict,
List,
Optional,
)
from fastapi import (
Body,
Path,
Query,
Response,
status,
)
from gxformat2._yaml import ordered_dump
from markupsafe import escape
from pydantic import Extra
from galaxy import (
exceptions,
model,
util,
)
from galaxy.files.uris import (
stream_url_to_str,
validate_uri_access,
)
from galaxy.managers.context import ProvidesUserContext
from galaxy.managers.jobs import (
fetch_job_states,
invocation_job_source_iter,
summarize_job_metrics,
)
from galaxy.managers.workflows import (
MissingToolsException,
RefactorRequest,
WorkflowCreateOptions,
WorkflowUpdateOptions,
)
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.schema.fields import EncodedDatabaseIdField
from galaxy.schema.schema import (
AsyncFile,
AsyncTaskResultSummary,
SetSlugPayload,
ShareWithPayload,
ShareWithStatus,
SharingStatus,
StoreContentSource,
WorkflowSortByEnum,
WriteStoreToPayload,
)
from galaxy.structured_app import StructuredApp
from galaxy.tool_shed.galaxy_install.install_manager import InstallRepositoryManager
from galaxy.tools import recommendations
from galaxy.tools.parameters import populate_state
from galaxy.tools.parameters.basic import workflow_building_modes
from galaxy.util.sanitize_html import sanitize_html
from galaxy.version import VERSION
from galaxy.web import (
expose_api,
expose_api_anonymous,
expose_api_anonymous_and_sessionless,
expose_api_raw,
expose_api_raw_anonymous_and_sessionless,
format_return_as_json,
)
from galaxy.webapps.base.controller import (
SharableMixin,
url_for,
UsesStoredWorkflowMixin,
)
from galaxy.webapps.base.webapp import GalaxyWebTransaction
from galaxy.webapps.galaxy.services.base import (
ConsumesModelStores,
ServesExportStores,
)
from galaxy.webapps.galaxy.services.invocations import (
InvocationIndexPayload,
InvocationSerializationParams,
InvocationsService,
PrepareStoreDownloadPayload,
)
from galaxy.webapps.galaxy.services.workflows import (
WorkflowIndexPayload,
WorkflowsService,
)
from galaxy.workflow.extract import extract_workflow
from galaxy.workflow.modules import module_factory
from galaxy.workflow.run import queue_invoke
from galaxy.workflow.run_request import build_workflow_run_configs
from . import (
BaseGalaxyAPIController,
depends,
DependsOnTrans,
IndexQueryTag,
Router,
search_query_param,
)
log = logging.getLogger(__name__)
router = Router(tags=["workflows"])
class CreateInvocationFromStore(StoreContentSource):
history_id: Optional[str]
class Config:
extra = Extra.allow
class WorkflowsAPIController(
BaseGalaxyAPIController,
UsesStoredWorkflowMixin,
UsesAnnotations,
SharableMixin,
ServesExportStores,
ConsumesModelStores,
):
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
def __init__(self, app: StructuredApp):
super().__init__(app)
self.history_manager = app.history_manager
self.workflow_manager = app.workflow_manager
self.workflow_contents_manager = app.workflow_contents_manager
self.tool_recommendations = recommendations.ToolRecommendations()
@expose_api
def get_workflow_menu(self, trans: ProvidesUserContext, **kwd):
"""
Get workflows present in the tools panel
GET /api/workflows/menu
"""
user = trans.user
ids_in_menu = [x.stored_workflow_id for x in user.stored_workflow_menu_entries]
workflows = self.get_workflows_list(trans, **kwd)
return {"ids_in_menu": ids_in_menu, "workflows": workflows}
@expose_api
def set_workflow_menu(self, trans: GalaxyWebTransaction, payload=None, **kwd):
"""
Save workflow menu to be shown in the tool panel
PUT /api/workflows/menu
"""
payload = payload or {}
user = trans.user
workflow_ids = payload.get("workflow_ids")
if workflow_ids is None:
workflow_ids = []
elif type(workflow_ids) != list:
workflow_ids = [workflow_ids]
workflow_ids_decoded = []
# Decode the encoded workflow ids
for ids in workflow_ids:
workflow_ids_decoded.append(trans.security.decode_id(ids))
sess = trans.sa_session
# This explicit remove seems like a hack, need to figure out
# how to make the association do it automatically.
for m in user.stored_workflow_menu_entries:
sess.delete(m)
user.stored_workflow_menu_entries = []
q = sess.query(model.StoredWorkflow)
# To ensure id list is unique
seen_workflow_ids = set()
for wf_id in workflow_ids_decoded:
if wf_id in seen_workflow_ids:
continue
else:
seen_workflow_ids.add(wf_id)
m = model.StoredWorkflowMenuEntry()
m.stored_workflow = q.get(wf_id)
user.stored_workflow_menu_entries.append(m)
sess.flush()
message = "Menu updated."
trans.set_message(message)
return {"message": message, "status": "done"}
def get_workflows_list(
self,
trans: ProvidesUserContext,
missing_tools=False,
show_published=None,
show_shared=None,
show_hidden=False,
show_deleted=False,
**kwd,
):
"""
Displays a collection of workflows.
:param show_published: Optional boolean to include published workflows
If unspecified this behavior depends on whether the request
is coming from an authenticated session. The default is true
for annonymous API requests and false otherwise.
:type show_published: boolean
:param show_hidden: if True, show hidden workflows
:type show_hidden: boolean
:param show_deleted: if True, show deleted workflows
:type show_deleted: boolean
:param show_shared: Optional boolean to include shared workflows.
If unspecified this behavior depends on show_deleted/show_hidden.
Defaulting to false if show_hidden or show_deleted is true or else
false.
:param missing_tools: if True, include a list of missing tools per workflow
:type missing_tools: boolean
"""
show_published = util.string_as_bool_or_none(show_published)
show_hidden = util.string_as_bool(show_hidden)
show_deleted = util.string_as_bool(show_deleted)
missing_tools = util.string_as_bool(missing_tools)
show_shared = util.string_as_bool_or_none(show_shared)
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
)
workflows, _ = self.service.index(trans, payload)
return workflows
@expose_api_anonymous_and_sessionless
def show(self, trans: GalaxyWebTransaction, id, **kwd):
"""
GET /api/workflows/{encoded_workflow_id}
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
Displays information needed to run a workflow.
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
if stored_workflow.importable is False and stored_workflow.user != trans.user and not trans.user_is_admin:
if (
trans.sa_session.query(model.StoredWorkflowUserShareAssociation)
.filter_by(user=trans.user, stored_workflow=stored_workflow)
.count()
== 0
):
message = "Workflow is neither importable, nor owned by or shared with current user"
raise exceptions.ItemAccessibilityException(message)
if kwd.get("legacy", False):
style = "legacy"
else:
style = "instance"
version = kwd.get("version")
if version is None and util.string_as_bool(kwd.get("instance", "false")):
# A Workflow instance may not be the latest workflow version attached to StoredWorkflow.
# This figures out the correct version so that we return the correct Workflow and version.
workflow_id = self.decode_id(id)
for i, workflow in enumerate(reversed(stored_workflow.workflows)):
if workflow.id == workflow_id:
version = i
break
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style=style, version=version)
@expose_api
def show_versions(self, trans: GalaxyWebTransaction, workflow_id, **kwds):
"""
GET /api/workflows/{encoded_workflow_id}/versions
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
Lists all versions of this workflow.
"""
instance = util.string_as_bool(kwds.get("instance", "false"))
stored_workflow = self.workflow_manager.get_stored_accessible_workflow(
trans, workflow_id, by_stored_id=not instance
)
return [
{"version": i, "update_time": str(w.update_time), "steps": len(w.steps)}
for i, w in enumerate(reversed(stored_workflow.workflows))
]
@expose_api
def create(self, trans: GalaxyWebTransaction, payload=None, **kwd):
"""
POST /api/workflows
Create workflows in various ways.
:param from_history_id: Id of history to extract a workflow from.
:type from_history_id: str
:param job_ids: If from_history_id is set - optional list of jobs to include when extracting a workflow from history
:type job_ids: str
:param dataset_ids: If from_history_id is set - optional list of HDA "hid"s corresponding to workflow inputs when extracting a workflow from history
:type dataset_ids: str
:param dataset_collection_ids: If from_history_id is set - optional list of HDCA "hid"s corresponding to workflow inputs when extracting a workflow from history
:type dataset_collection_ids: str
:param workflow_name: If from_history_id is set - name of the workflow to create when extracting a workflow from history
:type workflow_name: str
"""
ways_to_create = {
"archive_source",
"from_history_id",
"from_path",
"shared_workflow_id",
"workflow",
}
if trans.user_is_bootstrap_admin:
raise exceptions.RealUserRequiredException("Only real users can create or run workflows.")
if payload is None or len(ways_to_create.intersection(payload)) == 0:
message = f"One parameter among - {', '.join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterMissingException(message)
if len(ways_to_create.intersection(payload)) > 1:
message = f"Only one parameter among - {', '.join(ways_to_create)} - must be specified"
raise exceptions.RequestParameterInvalidException(message)
if "archive_source" in payload:
archive_source = payload["archive_source"]
archive_file = payload.get("archive_file")
archive_data = None
if archive_source:
validate_uri_access(archive_source, trans.user_is_admin, trans.app.config.fetch_url_allowlist_ips)
if archive_source.startswith("file://"):
workflow_src = {"src": "from_path", "path": archive_source[len("file://") :]}
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
elif archive_source == "trs_tool":
trs_server = payload.get("trs_server")
trs_tool_id = payload.get("trs_tool_id")
trs_version_id = payload.get("trs_version_id")
import_source = None
archive_data = self.app.trs_proxy.get_version_descriptor(trs_server, trs_tool_id, trs_version_id)
else:
try:
archive_data = stream_url_to_str(
archive_source, trans.app.file_sources, prefix="gx_workflow_download"
)
import_source = "URL"
except Exception:
raise exceptions.MessageException(f"Failed to open URL '{escape(archive_source)}'.")
elif hasattr(archive_file, "file"):
uploaded_file = archive_file.file
uploaded_file_name = uploaded_file.name
if os.path.getsize(os.path.abspath(uploaded_file_name)) > 0:
archive_data = util.unicodify(uploaded_file.read())
import_source = "uploaded file"
else:
raise exceptions.MessageException("You attempted to upload an empty file.")
else:
raise exceptions.MessageException("Please provide a URL or file.")
return self.__api_import_from_archive(trans, archive_data, import_source, payload=payload)
if "from_history_id" in payload:
from_history_id = payload.get("from_history_id")
from_history_id = self.decode_id(from_history_id)
history = self.history_manager.get_accessible(from_history_id, trans.user, current_history=trans.history)
job_ids = [self.decode_id(_) for _ in payload.get("job_ids", [])]
dataset_ids = payload.get("dataset_ids", [])
dataset_collection_ids = payload.get("dataset_collection_ids", [])
workflow_name = payload["workflow_name"]
stored_workflow = extract_workflow(
trans=trans,
user=trans.user,
history=history,
job_ids=job_ids,
dataset_ids=dataset_ids,
dataset_collection_ids=dataset_collection_ids,
workflow_name=workflow_name,
)
item = stored_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["url"] = url_for("workflow", id=item["id"])
return item
if "from_path" in payload:
from_path = payload.get("from_path")
object_id = payload.get("object_id")
workflow_src = {"src": "from_path", "path": from_path}
if object_id is not None:
workflow_src["object_id"] = object_id
payload["workflow"] = workflow_src
return self.__api_import_new_workflow(trans, payload, **kwd)
if "shared_workflow_id" in payload:
workflow_id = payload["shared_workflow_id"]
return self.__api_import_shared_workflow(trans, workflow_id, payload)
if "workflow" in payload:
return self.__api_import_new_workflow(trans, payload, **kwd)
# This was already raised above, but just in case...
raise exceptions.RequestParameterMissingException("No method for workflow creation supplied.")
@expose_api_raw_anonymous_and_sessionless
def workflow_dict(self, trans: GalaxyWebTransaction, workflow_id, **kwd):
"""
GET /api/workflows/{encoded_workflow_id}/download
Returns a selected workflow.
:type style: str
:param style: Style of export. The default is 'export', which is the meant to be used
with workflow import endpoints. Other formats such as 'instance', 'editor',
'run' are more tied to the GUI and should not be considered stable APIs.
The default format for 'export' is specified by the
admin with the `default_workflow_export_format` config
option. Style can be specified as either 'ga' or 'format2' directly
to be explicit about which format to download.
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
"""
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, **kwd)
style = kwd.get("style", "export")
download_format = kwd.get("format")
version = kwd.get("version")
history_id = kwd.get("history_id")
history = None
if history_id:
history = self.history_manager.get_accessible(
self.decode_id(history_id), trans.user, current_history=trans.history
)
ret_dict = self.workflow_contents_manager.workflow_to_dict(
trans, stored_workflow, style=style, version=version, history=history
)
if download_format == "json-download":
sname = stored_workflow.name
sname = "".join(c in util.FILENAME_VALID_CHARS and c or "_" for c in sname)[0:150]
if ret_dict.get("format-version", None) == "0.1":
extension = "ga"
else:
extension = "gxwf.json"
trans.response.headers[
"Content-Disposition"
] = f'attachment; filename="Galaxy-Workflow-{sname}.{extension}"'
trans.response.set_content_type("application/galaxy-archive")
if style == "format2" and download_format != "json-download":
return ordered_dump(ret_dict)
else:
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def delete(self, trans: ProvidesUserContext, id, **kwd):
"""
DELETE /api/workflows/{encoded_workflow_id}
Deletes a specified workflow
Author: rpark
copied from galaxy.web.controllers.workflows.py (delete)
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwd)
# check to see if user has permissions to selected workflow
if stored_workflow.user != trans.user and not trans.user_is_admin:
raise exceptions.InsufficientPermissionsException()
# Mark a workflow as deleted
stored_workflow.deleted = True
trans.sa_session.flush()
# TODO: Unsure of response message to let api know that a workflow was successfully deleted
return f"Workflow '{stored_workflow.name}' successfully deleted"
@expose_api
def import_new_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
"""
POST /api/workflows/upload
Importing dynamic workflows from the api. Return newly generated workflow id.
Author: rpark
# currently assumes payload['workflow'] is a json representation of a workflow to be inserted into the database
Deprecated in favor to POST /api/workflows with encoded 'workflow' in
payload the same way.
"""
return self.__api_import_new_workflow(trans, payload, **kwd)
@expose_api
def update(self, trans: GalaxyWebTransaction, id, payload, **kwds):
"""
PUT /api/workflows/{id}
Update the workflow stored with ``id``.
:type id: str
:param id: the encoded id of the workflow to update
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false by default.
:type instance: boolean
:type payload: dict
:param payload: a dictionary containing any or all the
:workflow:
the json description of the workflow as would be
produced by GET workflows/<id>/download or
given to `POST workflows`
The workflow contents will be updated to target this.
:name:
optional string name for the workflow, if not present in payload,
name defaults to existing name
:annotation:
optional string annotation for the workflow, if not present in payload,
annotation defaults to existing annotation
:menu_entry:
optional boolean marking if the workflow should appear in the user\'s menu,
if not present, workflow menu entries are not modified
:tags:
optional list containing list of tags to add to the workflow (overwriting
existing tags), if not present, tags are not modified
:from_tool_form:
True iff encoded state coming in is encoded for the tool form.
:rtype: dict
:returns: serialized version of the workflow
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
workflow_dict = payload.get("workflow", {})
workflow_dict.update({k: v for k, v in payload.items() if k not in workflow_dict})
if workflow_dict:
require_flush = False
raw_workflow_description = self.__normalize_workflow(trans, workflow_dict)
workflow_dict = raw_workflow_description.as_dict
new_workflow_name = workflow_dict.get("name")
old_workflow = stored_workflow.latest_workflow
name_updated = new_workflow_name and new_workflow_name != stored_workflow.name
steps_updated = "steps" in workflow_dict
if name_updated and not steps_updated:
sanitized_name = sanitize_html(new_workflow_name or old_workflow.name)
workflow = old_workflow.copy(user=trans.user)
workflow.stored_workflow = stored_workflow
workflow.name = sanitized_name
stored_workflow.name = sanitized_name
stored_workflow.latest_workflow = workflow
trans.sa_session.add(workflow, stored_workflow)
require_flush = True
if "hidden" in workflow_dict and stored_workflow.hidden != workflow_dict["hidden"]:
stored_workflow.hidden = workflow_dict["hidden"]
require_flush = True
if "published" in workflow_dict and stored_workflow.published != workflow_dict["published"]:
stored_workflow.published = workflow_dict["published"]
require_flush = True
if "importable" in workflow_dict and stored_workflow.importable != workflow_dict["importable"]:
stored_workflow.importable = workflow_dict["importable"]
require_flush = True
if "annotation" in workflow_dict and not steps_updated:
newAnnotation = sanitize_html(workflow_dict["annotation"])
self.add_item_annotation(trans.sa_session, trans.user, stored_workflow, newAnnotation)
require_flush = True
if "menu_entry" in workflow_dict or "show_in_tool_panel" in workflow_dict:
show_in_panel = workflow_dict.get("menu_entry") or workflow_dict.get("show_in_tool_panel")
stored_workflow_menu_entries = trans.user.stored_workflow_menu_entries
decoded_id = trans.security.decode_id(id)
if show_in_panel:
workflow_ids = [wf.stored_workflow_id for wf in stored_workflow_menu_entries]
if decoded_id not in workflow_ids:
menu_entry = model.StoredWorkflowMenuEntry()
menu_entry.stored_workflow = stored_workflow
stored_workflow_menu_entries.append(menu_entry)
trans.sa_session.add(menu_entry)
require_flush = True
else:
# remove if in list
entries = {x.stored_workflow_id: x for x in stored_workflow_menu_entries}
if decoded_id in entries:
stored_workflow_menu_entries.remove(entries[decoded_id])
require_flush = True
# set tags
if "tags" in workflow_dict:
trans.app.tag_handler.set_tags_from_list(
user=trans.user, item=stored_workflow, new_tags_list=workflow_dict["tags"]
)
if require_flush:
trans.sa_session.flush()
if "steps" in workflow_dict:
try:
workflow_update_options = WorkflowUpdateOptions(**payload)
workflow, errors = self.workflow_contents_manager.update_workflow_from_raw_description(
trans,
stored_workflow,
raw_workflow_description,
workflow_update_options,
)
except MissingToolsException:
raise exceptions.MessageException(
"This workflow contains missing tools. It cannot be saved until they have been removed from the workflow or installed."
)
else:
message = "Updating workflow requires dictionary containing 'workflow' attribute with new JSON description."
raise exceptions.RequestParameterInvalidException(message)
return self.workflow_contents_manager.workflow_to_dict(trans, stored_workflow, style="instance")
@expose_api
def refactor(self, trans, id, payload, **kwds):
"""
* PUT /api/workflows/{id}/refactor
updates the workflow stored with ``id``
:type id: str
:param id: the encoded id of the workflow to update
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
:type payload: dict
:param payload: a dictionary containing list of actions to apply.
:rtype: dict
:returns: serialized version of the workflow
"""
stored_workflow = self.__get_stored_workflow(trans, id, **kwds)
refactor_request = RefactorRequest(**payload)
return self.workflow_contents_manager.refactor(trans, stored_workflow, refactor_request)
@expose_api
def build_module(self, trans: GalaxyWebTransaction, payload=None):
"""
POST /api/workflows/build_module
Builds module models for the workflow editor.
"""
if payload is None:
payload = {}
inputs = payload.get("inputs", {})
trans.workflow_building_mode = workflow_building_modes.ENABLED
module = module_factory.from_dict(trans, payload, from_tool_form=True)
if "tool_state" not in payload:
module_state: Dict[str, Any] = {}
populate_state(trans, module.get_inputs(), inputs, module_state, check=False)
module.recover_state(module_state, from_tool_form=True)
return {
"label": inputs.get("__label", ""),
"annotation": inputs.get("__annotation", ""),
"name": module.get_name(),
"tool_state": module.get_state(),
"content_id": module.get_content_id(),
"inputs": module.get_all_inputs(connectable_only=True),
"outputs": module.get_all_outputs(),
"config_form": module.get_config_form(),
"post_job_actions": module.get_post_job_actions(inputs),
}
@expose_api
def get_tool_predictions(self, trans: ProvidesUserContext, payload, **kwd):
"""
POST /api/workflows/get_tool_predictions
Fetch predicted tools for a workflow
:type payload: dict
:param payload:
a dictionary containing two parameters
'tool_sequence' - comma separated sequence of tool ids
'remote_model_url' - (optional) path to the deep learning model
"""
remote_model_url = payload.get("remote_model_url", trans.app.config.tool_recommendation_model_path)
tool_sequence = payload.get("tool_sequence", "")
if "tool_sequence" not in payload or remote_model_url is None:
return
tool_sequence, recommended_tools = self.tool_recommendations.get_predictions(
trans, tool_sequence, remote_model_url
)
return {"current_tool": tool_sequence, "predicted_data": recommended_tools}
#
# -- Helper methods --
#
def __api_import_from_archive(self, trans: GalaxyWebTransaction, archive_data, source=None, payload=None):
payload = payload or {}
try:
data = json.loads(archive_data)
except Exception:
if "GalaxyWorkflow" in archive_data:
data = {"yaml_content": archive_data}
else:
raise exceptions.MessageException("The data content does not appear to be a valid workflow.")
if not data:
raise exceptions.MessageException("The data content is missing.")
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans, raw_workflow_description, workflow_create_options, source=source
)
workflow_id = workflow.id
workflow = workflow.latest_workflow
response = {
"message": f"Workflow '{escape(workflow.name)}' imported successfully.",
"status": "success",
"id": trans.security.encode_id(workflow_id),
}
if workflow.has_errors:
response["message"] = "Imported, but some steps in this workflow have validation errors."
response["status"] = "error"
elif len(workflow.steps) == 0:
response["message"] = "Imported, but this workflow has no steps."
response["status"] = "error"
elif workflow.has_cycles:
response["message"] = "Imported, but this workflow contains cycles."
response["status"] = "error"
return response
def __api_import_new_workflow(self, trans: GalaxyWebTransaction, payload, **kwd):
data = payload["workflow"]
raw_workflow_description = self.__normalize_workflow(trans, data)
workflow_create_options = WorkflowCreateOptions(**payload)
workflow, missing_tool_tups = self._workflow_from_dict(
trans,
raw_workflow_description,
workflow_create_options,
)
# galaxy workflow newly created id
workflow_id = workflow.id
# api encoded, id
encoded_id = trans.security.encode_id(workflow_id)
item = workflow.to_dict(value_mapper={"id": trans.security.encode_id})
item["annotations"] = [x.annotation for x in workflow.annotations]
item["url"] = url_for("workflow", id=encoded_id)
item["owner"] = workflow.user.username
item["number_of_steps"] = len(workflow.latest_workflow.steps)
return item
def __normalize_workflow(self, trans: GalaxyWebTransaction, as_dict):
return self.workflow_contents_manager.normalize_workflow_format(trans, as_dict)
@expose_api
def import_shared_workflow_deprecated(self, trans: GalaxyWebTransaction, payload, **kwd):
"""
POST /api/workflows/import
Import a workflow shared by other users.
:param workflow_id: the workflow id (required)
:type workflow_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
# Pull parameters out of payload.
workflow_id = payload.get("workflow_id", None)
if workflow_id is None:
raise exceptions.ObjectAttributeMissingException("Missing required parameter 'workflow_id'.")
self.__api_import_shared_workflow(trans, workflow_id, payload)
def __api_import_shared_workflow(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
try:
stored_workflow = self.get_stored_workflow(trans, workflow_id, check_ownership=False)
except Exception:
raise exceptions.ObjectNotFound(f"Malformed workflow id ( {workflow_id} ) specified.")
if stored_workflow.importable is False:
raise exceptions.ItemAccessibilityException(
"The owner of this workflow has disabled imports via this link."
)
elif stored_workflow.deleted:
raise exceptions.ItemDeletionException("You can't import this workflow because it has been deleted.")
imported_workflow = self._import_shared_workflow(trans, stored_workflow)
item = imported_workflow.to_dict(value_mapper={"id": trans.security.encode_id})
encoded_id = trans.security.encode_id(imported_workflow.id)
item["url"] = url_for("workflow", id=encoded_id)
return item
@expose_api
def invoke(self, trans: GalaxyWebTransaction, workflow_id, payload, **kwd):
"""
POST /api/workflows/{encoded_workflow_id}/invocations
Schedule the workflow specified by `workflow_id` to run.
.. note:: This method takes the same arguments as
:func:`galaxy.webapps.galaxy.api.workflows.WorkflowsAPIController.create` above.
:raises: exceptions.MessageException, exceptions.RequestParameterInvalidException
"""
# Get workflow + accessibility check.
stored_workflow = self.__get_stored_accessible_workflow(trans, workflow_id, instance=kwd.get("instance", False))
workflow = stored_workflow.latest_workflow
run_configs = build_workflow_run_configs(trans, workflow, payload)
is_batch = payload.get("batch")
if not is_batch and len(run_configs) != 1:
raise exceptions.RequestParameterInvalidException("Must specify 'batch' to use batch parameters.")
require_exact_tool_versions = util.string_as_bool(payload.get("require_exact_tool_versions", "true"))
tools = self.workflow_contents_manager.get_all_tools(workflow)
missing_tools = [
tool
for tool in tools
if not self.app.toolbox.has_tool(
tool["tool_id"], tool_version=tool["tool_version"], exact=require_exact_tool_versions
)
]
if missing_tools:
missing_tools_message = "Workflow was not invoked; the following required tools are not installed: "
if require_exact_tool_versions:
missing_tools_message += ", ".join(
[f"{tool['tool_id']} (version {tool['tool_version']})" for tool in missing_tools]
)
else:
missing_tools_message += ", ".join([tool["tool_id"] for tool in missing_tools])
raise exceptions.MessageException(missing_tools_message)
invocations = []
for run_config in run_configs:
workflow_scheduler_id = payload.get("scheduler", None)
# TODO: workflow scheduler hints
work_request_params = dict(scheduler=workflow_scheduler_id)
workflow_invocation = queue_invoke(
trans=trans,
workflow=workflow,
workflow_run_config=run_config,
request_params=work_request_params,
flush=False,
)
invocations.append(workflow_invocation)
trans.sa_session.flush()
invocations = [self.encode_all_ids(trans, invocation.to_dict(), recursive=True) for invocation in invocations]
if is_batch:
return invocations
else:
return invocations[0]
@expose_api
def index_invocations(self, trans: GalaxyWebTransaction, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations
GET /api/invocations
Get the list of a user's workflow invocations. If workflow_id is supplied
(either via URL or query parameter) it should be an encoded StoredWorkflow id
and returned invocations will be restricted to that workflow. history_id (an encoded
History id) can be used to further restrict the query. If neither a workflow_id or
history_id is supplied, all the current user's workflow invocations will be indexed
(as determined by the invocation being executed on one of the user's histories).
:param workflow_id: an encoded stored workflow id to restrict query to
:type workflow_id: str
:param instance: true if fetch by Workflow ID instead of StoredWorkflow id, false
by default.
:type instance: boolean
:param history_id: an encoded history id to restrict query to
:type history_id: str
:param job_id: an encoded job id to restrict query to
:type job_id: str
:param user_id: an encoded user id to restrict query to, must be own id if not admin user
:type user_id: str
:param view: level of detail to return per invocation 'element' or 'collection'.
:type view: str
:param step_details: If 'view' is 'element', also include details on individual steps.
:type step_details: bool
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
invocation_payload = InvocationIndexPayload(**kwd)
serialization_params = InvocationSerializationParams(**kwd)
invocations, total_matches = self.invocations_service.index(trans, invocation_payload, serialization_params)
trans.response.headers["total_matches"] = total_matches
return invocations
@expose_api_anonymous
def create_invocations_from_store(self, trans, payload, **kwd):
"""
POST /api/invocations/from_store
Create invocation(s) from a supplied model store.
Input can be an archive describing a Galaxy model store containing an
workflow invocation - for instance one created with with write_store
or prepare_store_download endpoint.
"""
create_payload = CreateInvocationFromStore(**payload)
serialization_params = InvocationSerializationParams(**payload)
# refactor into a service...
return self._create_from_store(trans, create_payload, serialization_params)
def _create_from_store(
self, trans, payload: CreateInvocationFromStore, serialization_params: InvocationSerializationParams
):
history = self.history_manager.get_owned(
self.decode_id(payload.history_id), trans.user, current_history=trans.history
)
object_tracker = self.create_objects_from_store(
trans,
payload,
history=history,
)
return self.invocations_service.serialize_workflow_invocations(
object_tracker.invocations_by_key.values(), serialization_params
)
@expose_api
def show_invocation(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}
GET /api/invocations/{invocation_id}
Get detailed description of workflow invocation
:param invocation_id: the invocation id (required)
:type invocation_id: str
:param step_details: fetch details about individual invocation steps
and populate a steps attribute in the resulting
dictionary. Defaults to false.
:type step_details: bool
:param legacy_job_state: If step_details is true, and this is set to true
populate the invocation step state with the job state
instead of the invocation step state. This will also
produce one step per job in mapping jobs to mimic the
older behavior with respect to collections. Partially
scheduled steps may provide incomplete information
and the listed steps outputs are the mapped over
step outputs but the individual job outputs
when this is set - at least for now.
:type legacy_job_state: bool
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id, eager=True)
if not workflow_invocation:
raise exceptions.ObjectNotFound()
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def cancel_invocation(self, trans: ProvidesUserContext, invocation_id, **kwd):
"""
DELETE /api/workflows/{workflow_id}/invocations/{invocation_id}
DELETE /api/invocations/{invocation_id}
Cancel the specified workflow invocation.
:param invocation_id: the usage id (required)
:type invocation_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.cancel_invocation(trans, decoded_workflow_invocation_id)
return self.__encode_invocation(workflow_invocation, **kwd)
@expose_api
def show_invocation_report(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/report
GET /api/invocations/{invocation_id}/report
Get JSON summarizing invocation for reporting.
"""
kwd["format"] = "json"
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
@expose_api_raw
def show_invocation_report_pdf(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/report.pdf
GET /api/invocations/{invocation_id}/report.pdf
Get JSON summarizing invocation for reporting.
"""
kwd["format"] = "pdf"
trans.response.set_content_type("application/pdf")
return self.workflow_manager.get_invocation_report(trans, invocation_id, **kwd)
def _generate_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
decoded_workflow_invocation_id = self.decode_id(invocation_id)
workflow_invocation = self.workflow_manager.get_invocation(trans, decoded_workflow_invocation_id)
history = workflow_invocation.history
workflow = workflow_invocation.workflow
stored_workflow = workflow.stored_workflow
# pull in the user info from those who the history and workflow has been shared with
contributing_users = [stored_workflow.user]
# may want to extend this to have more reviewers.
reviewing_users = [stored_workflow.user]
encoded_workflow_id = trans.security.encode_id(stored_workflow.id)
encoded_history_id = trans.security.encode_id(history.id)
dict_workflow = json.loads(self.workflow_dict(trans, encoded_workflow_id))
spec_version = kwd.get("spec_version", "https://w3id.org/ieee/ieee-2791-schema/2791object.json")
for i, w in enumerate(reversed(stored_workflow.workflows)):
if workflow == w:
current_version = i
contributors = []
for contributing_user in contributing_users:
contributor = {
"orcid": kwd.get("xref", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": ["authoredBy"],
"email": contributing_user.email,
}
contributors.append(contributor)
reviewers = []
for reviewer in reviewing_users:
reviewer = {
"status": "approved",
"reviewer_comment": "",
"date": workflow_invocation.update_time.isoformat(),
"reviewer": {
"orcid": kwd.get("orcid", []),
"name": contributing_user.username,
"affiliation": "",
"contribution": "curatedBy",
"email": contributing_user.email,
},
}
reviewers.append(reviewer)
provenance_domain = {
"name": workflow.name,
"version": current_version,
"review": reviewers,
"derived_from": url_for("workflow", id=encoded_workflow_id, qualified=True),
"created": workflow_invocation.create_time.isoformat(),
"modified": workflow_invocation.update_time.isoformat(),
"contributors": contributors,
"license": "https://spdx.org/licenses/CC-BY-4.0.html",
}
keywords = []
for tag in stored_workflow.tags:
keywords.append(tag.user_tname)
for tag in history.tags:
if tag.user_tname not in keywords:
keywords.append(tag.user_tname)
metrics = {}
tools, input_subdomain, output_subdomain, pipeline_steps, software_prerequisites = [], [], [], [], []
for step in workflow_invocation.steps:
if step.workflow_step.type == "tool":
workflow_outputs_list, output_list, input_list = set(), [], []
for wo in step.workflow_step.workflow_outputs:
workflow_outputs_list.add(wo.output_name)
for job in step.jobs:
metrics[i] = summarize_job_metrics(trans, job)
for job_input in job.input_datasets:
if hasattr(job_input.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_input.dataset.dataset_id)
input_obj = {
# TODO: that should maybe be a step prefix + element identifier where appropriate.
"filename": job_input.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_input.dataset.create_time.isoformat(),
}
input_list.append(input_obj)
for job_output in job.output_datasets:
if hasattr(job_output.dataset, "dataset_id"):
encoded_dataset_id = trans.security.encode_id(job_output.dataset.dataset_id)
output_obj = {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
}
output_list.append(output_obj)
if job_output.name in workflow_outputs_list:
output = {
"mediatype": job_output.dataset.extension,
"uri": {
"filename": job_output.dataset.name,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
qualified=True,
),
"access_time": job_output.dataset.create_time.isoformat(),
},
}
output_subdomain.append(output)
workflow_step = step.workflow_step
step_index = workflow_step.order_index
current_step = dict_workflow["steps"][str(step_index)]
pipeline_step = {
"step_number": step_index,
"name": current_step["name"],
"description": current_step["annotation"],
"version": current_step["tool_version"],
"prerequisite": kwd.get("prerequisite", []),
"input_list": input_list,
"output_list": output_list,
}
pipeline_steps.append(pipeline_step)
try:
software_prerequisite = {
"name": current_step["content_id"],
"version": current_step["tool_version"],
"uri": {"uri": current_step["content_id"], "access_time": current_step["uuid"]},
}
if software_prerequisite["uri"]["uri"] not in tools:
software_prerequisites.append(software_prerequisite)
tools.append(software_prerequisite["uri"]["uri"])
except Exception:
continue
if step.workflow_step.type == "data_input" and step.output_datasets:
for output_assoc in step.output_datasets:
encoded_dataset_id = trans.security.encode_id(output_assoc.dataset_id)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content", history_id=encoded_history_id, id=encoded_dataset_id, qualified=True
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
if step.workflow_step.type == "data_collection_input" and step.output_dataset_collections:
for output_dataset_collection_association in step.output_dataset_collections:
encoded_dataset_id = trans.security.encode_id(
output_dataset_collection_association.dataset_collection_id
)
input_obj = {
"filename": step.workflow_step.label,
"uri": url_for(
"history_content",
history_id=encoded_history_id,
id=encoded_dataset_id,
type="dataset_collection",
qualified=True,
),
"access_time": step.workflow_step.update_time.isoformat(),
}
input_subdomain.append(input_obj)
usability_domain = []
for a in stored_workflow.annotations:
usability_domain.append(a.annotation)
for h in history.annotations:
usability_domain.append(h.annotation)
parametric_domain = []
for inv_step in workflow_invocation.steps:
try:
for k, v in inv_step.workflow_step.tool_inputs.items():
param, value, step = k, v, inv_step.workflow_step.order_index
parametric_domain.append({"param": param, "value": value, "step": step})
except Exception:
continue
execution_domain = {
"script_access_type": "a_galaxy_workflow",
"script": [url_for("workflows", encoded_workflow_id=encoded_workflow_id, qualified=True)],
"script_driver": "Galaxy",
"software_prerequisites": software_prerequisites,
"external_data_endpoints": [
{"name": "Access to Galaxy", "url": url_for("/", qualified=True)},
kwd.get("external_data_endpoints"),
],
"environment_variables": kwd.get("environment_variables", {}),
}
extension = [
{
"extension_schema": "https://raw.githubusercontent.com/biocompute-objects/extension_domain/6d2cd8482e6075746984662edcf78b57d3d38065/galaxy/galaxy_extension.json",
"galaxy_extension": {
"galaxy_url": url_for("/", qualified=True),
"galaxy_version": VERSION,
# TODO:
# 'aws_estimate': aws_estimate,
# 'job_metrics': metrics
},
}
]
error_domain = {
"empirical_error": kwd.get("empirical_error", []),
"algorithmic_error": kwd.get("algorithmic_error", []),
}
bco_dict = {
"provenance_domain": provenance_domain,
"usability_domain": usability_domain,
"extension_domain": extension,
"description_domain": {
"keywords": keywords,
"xref": kwd.get("xref", []),
"platform": ["Galaxy"],
"pipeline_steps": pipeline_steps,
},
"execution_domain": execution_domain,
"parametric_domain": parametric_domain,
"io_domain": {
"input_subdomain": input_subdomain,
"output_subdomain": output_subdomain,
},
"error_domain": error_domain,
}
# Generate etag from the BCO excluding object_id and spec_version, as
# specified in https://github.com/biocompute-objects/BCO_Specification/blob/main/docs/top-level.md#203-etag-etag
etag = hashlib.sha256(json.dumps(bco_dict, sort_keys=True).encode()).hexdigest()
bco_dict.update(
{
"object_id": url_for(
controller=f"api/invocations/{invocation_id}", action="biocompute", qualified=True
),
"spec_version": spec_version,
"etag": etag,
}
)
return bco_dict
@expose_api
def export_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/invocations/{invocations_id}/biocompute
Return a BioCompute Object for the workflow invocation.
The BioCompute Object endpoints are in beta - important details such
as how inputs and outputs are represented, how the workflow is encoded,
and how author and version information is encoded, and how URLs are
generated will very likely change in important ways over time.
"""
return self._generate_invocation_bco(trans, invocation_id, **kwd)
@expose_api_raw
def download_invocation_bco(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/invocations/{invocations_id}/biocompute/download
Returns a selected BioCompute Object as a file for download (HTTP
headers configured with filename and such).
The BioCompute Object endpoints are in beta - important details such
as how inputs and outputs are represented, how the workflow is encoded,
and how author and version information is encoded, and how URLs are
generated will very likely change in important ways over time.
"""
ret_dict = self._generate_invocation_bco(trans, invocation_id, **kwd)
trans.response.headers["Content-Disposition"] = f'attachment; filename="bco_{invocation_id}.json"'
trans.response.set_content_type("application/json")
return format_return_as_json(ret_dict, pretty=True)
@expose_api
def invocation_step(self, trans, invocation_id, step_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/steps/{step_id}
GET /api/invocations/{invocation_id}/steps/{step_id}
:param invocation_id: the invocation id (required)
:type invocation_id: str
:param step_id: encoded id of the WorkflowInvocationStep (required)
:type step_id: str
:param payload: payload containing update action information
for running workflow.
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_invocation_step_id = self.decode_id(step_id)
invocation_step = self.workflow_manager.get_invocation_step(trans, decoded_invocation_step_id)
return self.__encode_invocation_step(trans, invocation_step)
@expose_api_anonymous_and_sessionless
def invocation_step_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/step_jobs_summary
GET /api/invocations/{invocation_id}/step_jobs_summary
return job state summary info aggregated across per step of the workflow invocation
Warning: We allow anyone to fetch job state information about any object they
can guess an encoded ID for - it isn't considered protected data. This keeps
polling IDs as part of state calculation for large histories and collections as
efficient as possible.
:param invocation_id: the invocation id (required)
:type invocation_id: str
:rtype: dict[]
:returns: an array of job summary object dictionaries for each step
"""
decoded_invocation_id = self.decode_id(invocation_id)
ids = []
types = []
for (job_source_type, job_source_id, _) in invocation_job_source_iter(trans.sa_session, decoded_invocation_id):
ids.append(job_source_id)
types.append(job_source_type)
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)]
@expose_api_anonymous_and_sessionless
def invocation_jobs_summary(self, trans: GalaxyWebTransaction, invocation_id, **kwd):
"""
GET /api/workflows/{workflow_id}/invocations/{invocation_id}/jobs_summary
GET /api/invocations/{invocation_id}/jobs_summary
return job state summary info aggregated across all current jobs of workflow invocation
Warning: We allow anyone to fetch job state information about any object they
can guess an encoded ID for - it isn't considered protected data. This keeps
polling IDs as part of state calculation for large histories and collections as
efficient as possible.
:param invocation_id: the invocation id (required)
:type invocation_id: str
:rtype: dict
:returns: a job summary object merged for all steps in workflow invocation
"""
ids = [self.decode_id(invocation_id)]
types = ["WorkflowInvocation"]
return [self.encode_all_ids(trans, s) for s in fetch_job_states(trans.sa_session, ids, types)][0]
@expose_api
def update_invocation_step(self, trans: GalaxyWebTransaction, invocation_id, step_id, payload, **kwd):
"""
PUT /api/workflows/{workflow_id}/invocations/{invocation_id}/steps/{step_id}
PUT /api/invocations/{invocation_id}/steps/{step_id}
Update state of running workflow step invocation - still very nebulous
but this would be for stuff like confirming paused steps can proceed
etc....
:param invocation_id: the usage id (required)
:type invocation_id: str
:param step_id: encoded id of the WorkflowInvocationStep (required)
:type step_id: str
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
decoded_invocation_step_id = self.decode_id(step_id)
action = payload.get("action", None)
invocation_step = self.workflow_manager.update_invocation_step(
trans,
decoded_invocation_step_id,
action=action,
)
return self.__encode_invocation_step(trans, invocation_step)
def _workflow_from_dict(self, trans, data, workflow_create_options, source=None):
"""Creates a workflow from a dict.
Created workflow is stored in the database and returned.
"""
publish = workflow_create_options.publish
importable = workflow_create_options.is_importable
if publish and not importable:
raise exceptions.RequestParameterInvalidException("Published workflow must be importable.")
workflow_contents_manager = self.app.workflow_contents_manager
raw_workflow_description = workflow_contents_manager.ensure_raw_description(data)
created_workflow = workflow_contents_manager.build_workflow_from_raw_description(
trans,
raw_workflow_description,
workflow_create_options,
source=source,
)
if importable:
self._make_item_accessible(trans.sa_session, created_workflow.stored_workflow)
trans.sa_session.flush()
self._import_tools_if_needed(trans, workflow_create_options, raw_workflow_description)
return created_workflow.stored_workflow, created_workflow.missing_tools
def _import_tools_if_needed(self, trans, workflow_create_options, raw_workflow_description):
if not workflow_create_options.import_tools:
return
if not trans.user_is_admin:
raise exceptions.AdminRequiredException()
data = raw_workflow_description.as_dict
tools = {}
for key in data["steps"]:
item = data["steps"][key]
if item is not None:
if "tool_shed_repository" in item:
tool_shed_repository = item["tool_shed_repository"]
if (
"owner" in tool_shed_repository
and "changeset_revision" in tool_shed_repository
and "name" in tool_shed_repository
and "tool_shed" in tool_shed_repository
):
toolstr = (
tool_shed_repository["owner"]
+ tool_shed_repository["changeset_revision"]
+ tool_shed_repository["name"]
+ tool_shed_repository["tool_shed"]
)
tools[toolstr] = tool_shed_repository
irm = InstallRepositoryManager(self.app)
install_options = workflow_create_options.install_options
for k in tools:
item = tools[k]
tool_shed_url = f"https://{item['tool_shed']}/"
name = item["name"]
owner = item["owner"]
changeset_revision = item["changeset_revision"]
irm.install(tool_shed_url, name, owner, changeset_revision, install_options)
def __encode_invocation_step(self, trans: ProvidesUserContext, invocation_step):
return self.encode_all_ids(trans, invocation_step.to_dict("element"), True)
def __get_stored_accessible_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_accessible_workflow(trans, workflow_id, by_stored_id=not instance)
def __get_stored_workflow(self, trans, workflow_id, **kwd):
instance = util.string_as_bool(kwd.get("instance", "false"))
return self.workflow_manager.get_stored_workflow(trans, workflow_id, by_stored_id=not instance)
def __encode_invocation(self, invocation, **kwd):
params = InvocationSerializationParams(**kwd)
return self.invocations_service.serialize_workflow_invocation(invocation, params)
StoredWorkflowIDPathParam: EncodedDatabaseIdField = Path(
..., title="Stored Workflow ID", description="The encoded database identifier of the Stored Workflow."
)
InvocationIDPathParam: EncodedDatabaseIdField = Path(
..., title="Invocation ID", description="The encoded database identifier of the Invocation."
)
DeletedQueryParam: bool = Query(
default=False, title="Display deleted", description="Whether to restrict result to deleted workflows."
)
HiddenQueryParam: bool = Query(
default=False, title="Display hidden", description="Whether to restrict result to hidden workflows."
)
MissingToolsQueryParam: bool = Query(
default=False,
title="Display missing tools",
description="Whether to include a list of missing tools per workflow entry",
)
ShowPublishedQueryParam: Optional[bool] = Query(default=None, title="Include published workflows.", description="")
ShowSharedQueryParam: Optional[bool] = Query(
default=None, title="Include workflows shared with authenticated user.", description=""
)
SortByQueryParam: Optional[WorkflowSortByEnum] = Query(
default=None,
title="Sort workflow index by this attribute",
description="In unspecified, default ordering depends on other parameters but generally the user's own workflows appear first based on update time",
)
SortDescQueryParam: Optional[bool] = Query(
default=None,
title="Sort Descending",
description="Sort in descending order?",
)
LimitQueryParam: Optional[int] = Query(default=None, title="Limit number of queries.")
OffsetQueryParam: Optional[int] = Query(
default=0,
title="Number of workflows to skip in sorted query (to enable pagination).",
)
query_tags = [
IndexQueryTag("name", "The stored workflow's name.", "n"),
IndexQueryTag(
"tag",
"The workflow's tag, if the tag contains a colon an approach will be made to match the key and value of the tag separately.",
"t",
),
IndexQueryTag("user", "The stored workflow's owner's username.", "u"),
IndexQueryTag(
"is:published",
"Include only published workflows in the final result. Be sure the the query parameter `show_published` is set to `true` if to include all published workflows and not just the requesting user's.",
),
IndexQueryTag(
"is:share_with_me",
"Include only workflows shared with the requesting user. Be sure the the query parameter `show_shared` is set to `true` if to include shared workflows.",
),
]
SearchQueryParam: Optional[str] = search_query_param(
model_name="Stored Workflow",
tags=query_tags,
free_text_fields=["name", "tag", "user"],
)
SkipStepCountsQueryParam: bool = Query(
default=False,
title="Skip step counts.",
description="Set this to true to skip joining workflow step counts and optimize the resulting index query. Response objects will not contain step counts.",
)
@router.cbv
class FastAPIWorkflows:
service: WorkflowsService = depends(WorkflowsService)
invocations_service: InvocationsService = depends(InvocationsService)
@router.get(
"/api/workflows",
summary="Lists stored workflows viewable by the user.",
response_description="A list with summary stored workflow information per viewable entry.",
)
def index(
self,
response: Response,
trans: ProvidesUserContext = DependsOnTrans,
show_deleted: bool = DeletedQueryParam,
show_hidden: bool = HiddenQueryParam,
missing_tools: bool = MissingToolsQueryParam,
show_published: Optional[bool] = ShowPublishedQueryParam,
show_shared: Optional[bool] = ShowSharedQueryParam,
sort_by: Optional[WorkflowSortByEnum] = SortByQueryParam,
sort_desc: Optional[bool] = SortDescQueryParam,
limit: Optional[int] = LimitQueryParam,
offset: Optional[int] = OffsetQueryParam,
search: Optional[str] = SearchQueryParam,
skip_step_counts: bool = SkipStepCountsQueryParam,
) -> List[Dict[str, Any]]:
"""Lists stored workflows viewable by the user."""
payload = WorkflowIndexPayload(
show_published=show_published,
show_hidden=show_hidden,
show_deleted=show_deleted,
show_shared=show_shared,
missing_tools=missing_tools,
sort_by=sort_by,
sort_desc=sort_desc,
limit=limit,
offset=offset,
search=search,
skip_step_counts=skip_step_counts,
)
workflows, total_matches = self.service.index(trans, payload, include_total_count=True)
response.headers["total_matches"] = str(total_matches)
return workflows
@router.get(
"/api/workflows/{id}/sharing",
summary="Get the current sharing status of the given item.",
)
def sharing(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Return the sharing status of the item."""
return self.service.shareable_service.sharing(trans, id)
@router.put(
"/api/workflows/{id}/enable_link_access",
summary="Makes this item accessible by a URL link.",
)
def enable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item accessible by a URL link and return the current sharing status."""
return self.service.shareable_service.enable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/disable_link_access",
summary="Makes this item inaccessible by a URL link.",
)
def disable_link_access(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item inaccessible by a URL link and return the current sharing status."""
return self.service.shareable_service.disable_link_access(trans, id)
@router.put(
"/api/workflows/{id}/publish",
summary="Makes this item public and accessible by a URL link.",
)
def publish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Makes this item publicly available by a URL link and return the current sharing status."""
return self.service.shareable_service.publish(trans, id)
@router.put(
"/api/workflows/{id}/unpublish",
summary="Removes this item from the published list.",
)
def unpublish(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
) -> SharingStatus:
"""Removes this item from the published list and return the current sharing status."""
return self.service.shareable_service.unpublish(trans, id)
@router.put(
"/api/workflows/{id}/share_with_users",
summary="Share this item with specific users.",
)
def share_with_users(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: ShareWithPayload = Body(...),
) -> ShareWithStatus:
"""Shares this item with specific users and return the current sharing status."""
return self.service.shareable_service.share_with_users(trans, id, payload)
@router.put(
"/api/workflows/{id}/slug",
summary="Set a new slug for this shared item.",
status_code=status.HTTP_204_NO_CONTENT,
)
def set_slug(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: EncodedDatabaseIdField = StoredWorkflowIDPathParam,
payload: SetSlugPayload = Body(...),
):
"""Sets a new slug to access this item by URL. The new slug must be unique."""
self.service.shareable_service.set_slug(trans, id, payload)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post(
"/api/invocations/{invocation_id}/prepare_store_download",
summary="Prepare a worklfow invocation export-style download.",
)
def prepare_store_download(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: PrepareStoreDownloadPayload = Body(...),
) -> AsyncFile:
return self.invocations_service.prepare_store_download(
trans,
invocation_id,
payload,
)
@router.post(
"/api/invocations/{invocation_id}/write_store",
summary="Prepare a worklfow invocation export-style download and write to supplied URI.",
)
def write_store(
self,
trans: ProvidesUserContext = DependsOnTrans,
invocation_id: EncodedDatabaseIdField = InvocationIDPathParam,
payload: WriteStoreToPayload = Body(...),
) -> AsyncTaskResultSummary:
rval = self.invocations_service.write_store(
trans,
invocation_id,
payload,
)
return rval
|
import argparse
import asyncio
import functools
import json
import logging
import re
import shlex
import urllib.request
import zlib
import ModuleUpdate
ModuleUpdate.update()
import websockets
import aioconsole
import Items
import Regions
from MultiClient import ReceivedItem, get_item_name_from_id, get_location_name_from_address
class Client:
def __init__(self, socket):
self.socket = socket
self.auth = False
self.name = None
self.team = None
self.slot = None
self.send_index = 0
class Context:
def __init__(self, host, port, password):
self.data_filename = None
self.save_filename = None
self.disable_save = False
self.player_names = {}
self.rom_names = {}
self.remote_items = set()
self.locations = {}
self.host = host
self.port = port
self.password = password
self.server = None
self.countdown_timer = 0
self.clients = []
self.received_items = {}
async def send_msgs(websocket, msgs):
if not websocket or not websocket.open or websocket.closed:
return
try:
await websocket.send(json.dumps(msgs))
except websockets.ConnectionClosed:
pass
def broadcast_all(ctx : Context, msgs):
for client in ctx.clients:
if client.auth:
asyncio.create_task(send_msgs(client.socket, msgs))
def broadcast_team(ctx : Context, team, msgs):
for client in ctx.clients:
if client.auth and client.team == team:
asyncio.create_task(send_msgs(client.socket, msgs))
def notify_all(ctx : Context, text):
logging.info("Notice (all): %s" % text)
broadcast_all(ctx, [['Print', text]])
def notify_team(ctx : Context, team : int, text : str):
logging.info("Notice (Team #%d): %s" % (team+1, text))
broadcast_team(ctx, team, [['Print', text]])
def notify_client(client : Client, text : str):
if not client.auth:
return
logging.info("Notice (Player %s in team %d): %s" % (client.name, client.team+1, text))
asyncio.create_task(send_msgs(client.socket, [['Print', text]]))
async def server(websocket, path, ctx : Context):
client = Client(websocket)
ctx.clients.append(client)
try:
await on_client_connected(ctx, client)
async for data in websocket:
for msg in json.loads(data):
if len(msg) == 1:
cmd = msg
args = None
else:
cmd = msg[0]
args = msg[1]
await process_client_cmd(ctx, client, cmd, args)
except Exception as e:
if not isinstance(e, websockets.WebSocketException):
logging.exception(e)
finally:
await on_client_disconnected(ctx, client)
ctx.clients.remove(client)
async def on_client_connected(ctx : Context, client : Client):
await send_msgs(client.socket, [['RoomInfo', {
'password': ctx.password is not None,
'players': [(client.team, client.slot, client.name) for client in ctx.clients if client.auth]
}]])
async def on_client_disconnected(ctx : Context, client : Client):
if client.auth:
await on_client_left(ctx, client)
async def on_client_joined(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has joined the game" % (client.name, client.team + 1))
async def on_client_left(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has left the game" % (client.name, client.team + 1))
async def countdown(ctx : Context, timer):
notify_all(ctx, f'[Server]: Starting countdown of {timer}s')
if ctx.countdown_timer:
ctx.countdown_timer = timer
return
ctx.countdown_timer = timer
while ctx.countdown_timer > 0:
notify_all(ctx, f'[Server]: {ctx.countdown_timer}')
ctx.countdown_timer -= 1
await asyncio.sleep(1)
notify_all(ctx, f'[Server]: GO')
def get_connected_players_string(ctx : Context):
auth_clients = [c for c in ctx.clients if c.auth]
if not auth_clients:
return 'No player connected'
auth_clients.sort(key=lambda c: (c.team, c.slot))
current_team = 0
text = 'Team #1: '
for c in auth_clients:
if c.team != current_team:
text += f':: Team #{c.team + 1}: '
current_team = c.team
text += f'{c.name} '
return 'Connected players: ' + text[:-1]
def get_received_items(ctx : Context, team, player):
return ctx.received_items.setdefault((team, player), [])
def tuplize_received_items(items):
return [(item.item, item.location, item.player) for item in items]
def send_new_items(ctx : Context):
for client in ctx.clients:
if not client.auth:
continue
items = get_received_items(ctx, client.team, client.slot)
if len(items) > client.send_index:
asyncio.create_task(send_msgs(client.socket, [['ReceivedItems', (client.send_index, tuplize_received_items(items)[client.send_index:])]]))
client.send_index = len(items)
def forfeit_player(ctx : Context, team, slot):
all_locations = [values[0] for values in Regions.location_table.values() if type(values[0]) is int]
notify_all(ctx, "%s (Team #%d) has forfeited" % (ctx.player_names[(team, slot)], team + 1))
register_location_checks(ctx, team, slot, all_locations)
def register_location_checks(ctx : Context, team, slot, locations):
found_items = False
for location in locations:
if (location, slot) in ctx.locations:
target_item, target_player = ctx.locations[(location, slot)]
if target_player != slot or slot in ctx.remote_items:
found = False
recvd_items = get_received_items(ctx, team, target_player)
for recvd_item in recvd_items:
if recvd_item.location == location and recvd_item.player == slot:
found = True
break
if not found:
new_item = ReceivedItem(target_item, location, slot)
recvd_items.append(new_item)
if slot != target_player:
broadcast_team(ctx, team, [['ItemSent', (slot, location, target_player, target_item)]])
logging.info('(Team #%d) %s sent %s to %s (%s)' % (team+1, ctx.player_names[(team, slot)], get_item_name_from_id(target_item), ctx.player_names[(team, target_player)], get_location_name_from_address(location)))
found_items = True
send_new_items(ctx)
if found_items and not ctx.disable_save:
try:
with open(ctx.save_filename, "wb") as f:
jsonstr = json.dumps((list(ctx.rom_names.items()),
[(k, [i.__dict__ for i in v]) for k, v in ctx.received_items.items()]))
f.write(zlib.compress(jsonstr.encode("utf-8")))
except Exception as e:
logging.exception(e)
async def process_client_cmd(ctx : Context, client : Client, cmd, args):
if type(cmd) is not str:
await send_msgs(client.socket, [['InvalidCmd']])
return
if cmd == 'Connect':
if not args or type(args) is not dict or \
'password' not in args or type(args['password']) not in [str, type(None)] or \
'rom' not in args or type(args['rom']) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'Connect']])
return
errors = set()
if ctx.password is not None and args['password'] != ctx.password:
errors.add('InvalidPassword')
if tuple(args['rom']) not in ctx.rom_names:
errors.add('InvalidRom')
else:
team, slot = ctx.rom_names[tuple(args['rom'])]
if any([c.slot == slot and c.team == team for c in ctx.clients if c.auth]):
errors.add('SlotAlreadyTaken')
else:
client.name = ctx.player_names[(team, slot)]
client.team = team
client.slot = slot
if errors:
await send_msgs(client.socket, [['ConnectionRefused', list(errors)]])
else:
client.auth = True
reply = [['Connected', [(client.team, client.slot), [(p, n) for (t, p), n in ctx.player_names.items() if t == client.team]]]]
items = get_received_items(ctx, client.team, client.slot)
if items:
reply.append(['ReceivedItems', (0, tuplize_received_items(items))])
client.send_index = len(items)
await send_msgs(client.socket, reply)
await on_client_joined(ctx, client)
if not client.auth:
return
if cmd == 'Sync':
items = get_received_items(ctx, client.team, client.slot)
if items:
client.send_index = len(items)
await send_msgs(client.socket, [['ReceivedItems', (0, tuplize_received_items(items))]])
if cmd == 'LocationChecks':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationChecks']])
return
register_location_checks(ctx, client.team, client.slot, args)
if cmd == 'LocationScouts':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
locs = []
for location in args:
if type(location) is not int or 0 >= location > len(Regions.location_table):
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
loc_name = list(Regions.location_table.keys())[location - 1]
target_item, target_player = ctx.locations[(Regions.location_table[loc_name][0], client.slot)]
replacements = {'SmallKey': 0xA2, 'BigKey': 0x9D, 'Compass': 0x8D, 'Map': 0x7D}
item_type = [i[2] for i in Items.item_table.values() if type(i[3]) is int and i[3] == target_item]
if item_type:
target_item = replacements.get(item_type[0], target_item)
locs.append([loc_name, location, target_item, target_player])
logging.info(f"{client.name} in team {client.team+1} scouted {", ".join([l[0] for l in locs])}")
await send_msgs(client.socket, [['LocationInfo', [l[1:] for l in locs]]])
if cmd == 'Say':
if type(args) is not str or not args.isprintable():
await send_msgs(client.socket, [['InvalidArguments', 'Say']])
return
notify_all(ctx, client.name + ': ' + args)
if args.startswith('!players'):
notify_all(ctx, get_connected_players_string(ctx))
if args.startswith('!forfeit'):
forfeit_player(ctx, client.team, client.slot)
if args.startswith('!countdown'):
try:
timer = int(args.split()[1])
except (IndexError, ValueError):
timer = 10
asyncio.create_task(countdown(ctx, timer))
def set_password(ctx : Context, password):
ctx.password = password
logging.warning('Password set to ' + password if password is not None else 'Password disabled')
async def console(ctx : Context):
while True:
input = await aioconsole.ainput()
try:
command = shlex.split(input)
if not command:
continue
if command[0] == '/exit':
ctx.server.ws_server.close()
break
if command[0] == '/players':
logging.info(get_connected_players_string(ctx))
if command[0] == '/password':
set_password(ctx, command[1] if len(command) > 1 else None)
if command[0] == '/kick' and len(command) > 1:
team = int(command[2]) - 1 if len(command) > 2 and command[2].isdigit() else None
for client in ctx.clients:
if client.auth and client.name.lower() == command[1].lower() and (team is None or team == client.team):
if client.socket and not client.socket.closed:
await client.socket.close()
if command[0] == '/forfeitslot' and len(command) > 1 and command[1].isdigit():
if len(command) > 2 and command[2].isdigit():
team = int(command[1]) - 1
slot = int(command[2])
else:
team = 0
slot = int(command[1])
forfeit_player(ctx, team, slot)
if command[0] == '/forfeitplayer' and len(command) > 1:
seeked_player = command[1].lower()
for (team, slot), name in ctx.player_names.items():
if name.lower() == seeked_player:
forfeit_player(ctx, team, slot)
if command[0] == '/senditem' and len(command) > 2:
[(player, item)] = re.findall(r'\S* (\S*) (.*)', input)
if item in Items.item_table:
for client in ctx.clients:
if client.auth and client.name.lower() == player.lower():
new_item = ReceivedItem(Items.item_table[item][3], "cheat console", client.slot)
get_received_items(ctx, client.team, client.slot).append(new_item)
notify_all(ctx, 'Cheat console: sending "' + item + '" to ' + client.name)
send_new_items(ctx)
else:
logging.warning("Unknown item: " + item)
if command[0] == '/hint':
for (team,slot), name in ctx.player_names.items():
if len(command) == 1:
print("Use /hint {Playername} {itemname}\nFor example /hint Berserker Lamp")
elif name.lower() == command[1].lower():
item = " ".join(command[2:])
if item in Items.item_table:
seeked_item_id = Items.item_table[item][3]
for check, result in ctx.locations.items():
item_id, receiving_player = result
if receiving_player == slot and item_id == seeked_item_id:
location_id, finding_player = check
name_finder = ctx.player_names[team, finding_player]
hint = f"[Hint]: {name}'s {item} can be found at " \
f"{get_location_name_from_address(location_id)} in {name_finder}'s World"
notify_team(ctx, team, hint)
else:
logging.warning("Unknown item: " + item)
if command[0][0] != '/':
notify_all(ctx, '[Server]: ' + input)
except:
import traceback
traceback.print_exc()
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', default=None)
parser.add_argument('--port', default=38281, type=int)
parser.add_argument('--password', default=None)
parser.add_argument('--multidata', default=None)
parser.add_argument('--savefile', default=None)
parser.add_argument('--disable_save', default=False, action='store_true')
parser.add_argument('--loglevel', default='info', choices=['debug', 'info', 'warning', 'error', 'critical'])
args = parser.parse_args()
logging.basicConfig(format='[%(asctime)s] %(message)s', level=getattr(logging, args.loglevel.upper(), logging.INFO))
ctx = Context(args.host, args.port, args.password)
ctx.data_filename = args.multidata
try:
if not ctx.data_filename:
import tkinter
import tkinter.filedialog
root = tkinter.Tk()
root.withdraw()
ctx.data_filename = tkinter.filedialog.askopenfilename(filetypes=(("Multiworld data","*multidata"),))
with open(ctx.data_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
for team, names in enumerate(jsonobj['names']):
for player, name in enumerate(names, 1):
ctx.player_names[(team, player)] = name
ctx.rom_names = {tuple(rom): (team, slot) for slot, team, rom in jsonobj['roms']}
ctx.remote_items = set(jsonobj['remote_items'])
ctx.locations = {tuple(k): tuple(v) for k, v in jsonobj['locations']}
except Exception as e:
logging.error('Failed to read multiworld data (%s)' % e)
return
ip = urllib.request.urlopen('https://v4.ident.me').read().decode('utf8') if not ctx.host else ctx.host
logging.info('Hosting game at %s:%d (%s)' % (ip, ctx.port, 'No password' if not ctx.password else 'Password: %s' % ctx.password))
ctx.disable_save = args.disable_save
if not ctx.disable_save:
if not ctx.save_filename:
ctx.save_filename = (ctx.data_filename[:-9] if ctx.data_filename[-9:] == 'multidata' else (ctx.data_filename + '_')) + 'multisave'
try:
with open(ctx.save_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
rom_names = jsonobj[0]
received_items = {tuple(k): [ReceivedItem(**i) for i in v] for k, v in jsonobj[1]}
if not all([ctx.rom_names[tuple(rom)] == (team, slot) for rom, (team, slot) in rom_names]):
raise Exception('Save file mismatch, will start a new game')
ctx.received_items = received_items
logging.info('Loaded save file with %d received items for %d players' % (sum([len(p) for p in received_items.values()]), len(received_items)))
except FileNotFoundError:
logging.error('No save data found, starting a new game')
except Exception as e:
logging.info(e)
ctx.server = websockets.serve(functools.partial(server,ctx=ctx), ctx.host, ctx.port, ping_timeout=None, ping_interval=None)
await ctx.server
await console(ctx)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.run_until_complete(asyncio.gather(*asyncio.Task.all_tasks()))
loop.close()
|
import argparse
import asyncio
import functools
import json
import logging
import re
import shlex
import urllib.request
import zlib
import ModuleUpdate
ModuleUpdate.update()
import websockets
import aioconsole
import Items
import Regions
from MultiClient import ReceivedItem, get_item_name_from_id, get_location_name_from_address
class Client:
def __init__(self, socket):
self.socket = socket
self.auth = False
self.name = None
self.team = None
self.slot = None
self.send_index = 0
class Context:
def __init__(self, host, port, password):
self.data_filename = None
self.save_filename = None
self.disable_save = False
self.player_names = {}
self.rom_names = {}
self.remote_items = set()
self.locations = {}
self.host = host
self.port = port
self.password = password
self.server = None
self.countdown_timer = 0
self.clients = []
self.received_items = {}
async def send_msgs(websocket, msgs):
if not websocket or not websocket.open or websocket.closed:
return
try:
await websocket.send(json.dumps(msgs))
except websockets.ConnectionClosed:
pass
def broadcast_all(ctx : Context, msgs):
for client in ctx.clients:
if client.auth:
asyncio.create_task(send_msgs(client.socket, msgs))
def broadcast_team(ctx : Context, team, msgs):
for client in ctx.clients:
if client.auth and client.team == team:
asyncio.create_task(send_msgs(client.socket, msgs))
def notify_all(ctx : Context, text):
logging.info("Notice (all): %s" % text)
broadcast_all(ctx, [['Print', text]])
def notify_team(ctx : Context, team : int, text : str):
logging.info("Notice (Team #%d): %s" % (team+1, text))
broadcast_team(ctx, team, [['Print', text]])
def notify_client(client : Client, text : str):
if not client.auth:
return
logging.info("Notice (Player %s in team %d): %s" % (client.name, client.team+1, text))
asyncio.create_task(send_msgs(client.socket, [['Print', text]]))
async def server(websocket, path, ctx : Context):
client = Client(websocket)
ctx.clients.append(client)
try:
await on_client_connected(ctx, client)
async for data in websocket:
for msg in json.loads(data):
if len(msg) == 1:
cmd = msg
args = None
else:
cmd = msg[0]
args = msg[1]
await process_client_cmd(ctx, client, cmd, args)
except Exception as e:
if not isinstance(e, websockets.WebSocketException):
logging.exception(e)
finally:
await on_client_disconnected(ctx, client)
ctx.clients.remove(client)
async def on_client_connected(ctx : Context, client : Client):
await send_msgs(client.socket, [['RoomInfo', {
'password': ctx.password is not None,
'players': [(client.team, client.slot, client.name) for client in ctx.clients if client.auth]
}]])
async def on_client_disconnected(ctx : Context, client : Client):
if client.auth:
await on_client_left(ctx, client)
async def on_client_joined(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has joined the game" % (client.name, client.team + 1))
async def on_client_left(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has left the game" % (client.name, client.team + 1))
async def countdown(ctx : Context, timer):
notify_all(ctx, f'[Server]: Starting countdown of {timer}s')
if ctx.countdown_timer:
ctx.countdown_timer = timer
return
ctx.countdown_timer = timer
while ctx.countdown_timer > 0:
notify_all(ctx, f'[Server]: {ctx.countdown_timer}')
ctx.countdown_timer -= 1
await asyncio.sleep(1)
notify_all(ctx, f'[Server]: GO')
def get_connected_players_string(ctx : Context):
auth_clients = [c for c in ctx.clients if c.auth]
if not auth_clients:
return 'No player connected'
auth_clients.sort(key=lambda c: (c.team, c.slot))
current_team = 0
text = 'Team #1: '
for c in auth_clients:
if c.team != current_team:
text += f':: Team #{c.team + 1}: '
current_team = c.team
text += f'{c.name} '
return 'Connected players: ' + text[:-1]
def get_received_items(ctx : Context, team, player):
return ctx.received_items.setdefault((team, player), [])
def tuplize_received_items(items):
return [(item.item, item.location, item.player) for item in items]
def send_new_items(ctx : Context):
for client in ctx.clients:
if not client.auth:
continue
items = get_received_items(ctx, client.team, client.slot)
if len(items) > client.send_index:
asyncio.create_task(send_msgs(client.socket, [['ReceivedItems', (client.send_index, tuplize_received_items(items)[client.send_index:])]]))
client.send_index = len(items)
def forfeit_player(ctx : Context, team, slot):
all_locations = [values[0] for values in Regions.location_table.values() if type(values[0]) is int]
notify_all(ctx, "%s (Team #%d) has forfeited" % (ctx.player_names[(team, slot)], team + 1))
register_location_checks(ctx, team, slot, all_locations)
def register_location_checks(ctx : Context, team, slot, locations):
found_items = False
for location in locations:
if (location, slot) in ctx.locations:
target_item, target_player = ctx.locations[(location, slot)]
if target_player != slot or slot in ctx.remote_items:
found = False
recvd_items = get_received_items(ctx, team, target_player)
for recvd_item in recvd_items:
if recvd_item.location == location and recvd_item.player == slot:
found = True
break
if not found:
new_item = ReceivedItem(target_item, location, slot)
recvd_items.append(new_item)
if slot != target_player:
broadcast_team(ctx, team, [['ItemSent', (slot, location, target_player, target_item)]])
logging.info('(Team #%d) %s sent %s to %s (%s)' % (team+1, ctx.player_names[(team, slot)], get_item_name_from_id(target_item), ctx.player_names[(team, target_player)], get_location_name_from_address(location)))
found_items = True
send_new_items(ctx)
if found_items and not ctx.disable_save:
try:
with open(ctx.save_filename, "wb") as f:
jsonstr = json.dumps((list(ctx.rom_names.items()),
[(k, [i.__dict__ for i in v]) for k, v in ctx.received_items.items()]))
f.write(zlib.compress(jsonstr.encode("utf-8")))
except Exception as e:
logging.exception(e)
async def process_client_cmd(ctx : Context, client : Client, cmd, args):
if type(cmd) is not str:
await send_msgs(client.socket, [['InvalidCmd']])
return
if cmd == 'Connect':
if not args or type(args) is not dict or \
'password' not in args or type(args['password']) not in [str, type(None)] or \
'rom' not in args or type(args['rom']) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'Connect']])
return
errors = set()
if ctx.password is not None and args['password'] != ctx.password:
errors.add('InvalidPassword')
if tuple(args['rom']) not in ctx.rom_names:
errors.add('InvalidRom')
else:
team, slot = ctx.rom_names[tuple(args['rom'])]
if any([c.slot == slot and c.team == team for c in ctx.clients if c.auth]):
errors.add('SlotAlreadyTaken')
else:
client.name = ctx.player_names[(team, slot)]
client.team = team
client.slot = slot
if errors:
await send_msgs(client.socket, [['ConnectionRefused', list(errors)]])
else:
client.auth = True
reply = [['Connected', [(client.team, client.slot), [(p, n) for (t, p), n in ctx.player_names.items() if t == client.team]]]]
items = get_received_items(ctx, client.team, client.slot)
if items:
reply.append(['ReceivedItems', (0, tuplize_received_items(items))])
client.send_index = len(items)
await send_msgs(client.socket, reply)
await on_client_joined(ctx, client)
if not client.auth:
return
if cmd == 'Sync':
items = get_received_items(ctx, client.team, client.slot)
if items:
client.send_index = len(items)
await send_msgs(client.socket, [['ReceivedItems', (0, tuplize_received_items(items))]])
if cmd == 'LocationChecks':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationChecks']])
return
register_location_checks(ctx, client.team, client.slot, args)
if cmd == 'LocationScouts':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
locs = []
for location in args:
if type(location) is not int or 0 >= location > len(Regions.location_table):
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
loc_name = list(Regions.location_table.keys())[location - 1]
target_item, target_player = ctx.locations[(Regions.location_table[loc_name][0], client.slot)]
replacements = {'SmallKey': 0xA2, 'BigKey': 0x9D, 'Compass': 0x8D, 'Map': 0x7D}
item_type = [i[2] for i in Items.item_table.values() if type(i[3]) is int and i[3] == target_item]
if item_type:
target_item = replacements.get(item_type[0], target_item)
locs.append([loc_name, location, target_item, target_player])
logging.info(f"{client.name} in team {client.team+1} scouted {', '.join([l[0] for l in locs])}")
await send_msgs(client.socket, [['LocationInfo', [l[1:] for l in locs]]])
if cmd == 'Say':
if type(args) is not str or not args.isprintable():
await send_msgs(client.socket, [['InvalidArguments', 'Say']])
return
notify_all(ctx, client.name + ': ' + args)
if args.startswith('!players'):
notify_all(ctx, get_connected_players_string(ctx))
if args.startswith('!forfeit'):
forfeit_player(ctx, client.team, client.slot)
if args.startswith('!countdown'):
try:
timer = int(args.split()[1])
except (IndexError, ValueError):
timer = 10
asyncio.create_task(countdown(ctx, timer))
def set_password(ctx : Context, password):
ctx.password = password
logging.warning('Password set to ' + password if password is not None else 'Password disabled')
async def console(ctx : Context):
while True:
input = await aioconsole.ainput()
try:
command = shlex.split(input)
if not command:
continue
if command[0] == '/exit':
ctx.server.ws_server.close()
break
if command[0] == '/players':
logging.info(get_connected_players_string(ctx))
if command[0] == '/password':
set_password(ctx, command[1] if len(command) > 1 else None)
if command[0] == '/kick' and len(command) > 1:
team = int(command[2]) - 1 if len(command) > 2 and command[2].isdigit() else None
for client in ctx.clients:
if client.auth and client.name.lower() == command[1].lower() and (team is None or team == client.team):
if client.socket and not client.socket.closed:
await client.socket.close()
if command[0] == '/forfeitslot' and len(command) > 1 and command[1].isdigit():
if len(command) > 2 and command[2].isdigit():
team = int(command[1]) - 1
slot = int(command[2])
else:
team = 0
slot = int(command[1])
forfeit_player(ctx, team, slot)
if command[0] == '/forfeitplayer' and len(command) > 1:
seeked_player = command[1].lower()
for (team, slot), name in ctx.player_names.items():
if name.lower() == seeked_player:
forfeit_player(ctx, team, slot)
if command[0] == '/senditem' and len(command) > 2:
[(player, item)] = re.findall(r'\S* (\S*) (.*)', input)
if item in Items.item_table:
for client in ctx.clients:
if client.auth and client.name.lower() == player.lower():
new_item = ReceivedItem(Items.item_table[item][3], "cheat console", client.slot)
get_received_items(ctx, client.team, client.slot).append(new_item)
notify_all(ctx, 'Cheat console: sending "' + item + '" to ' + client.name)
send_new_items(ctx)
else:
logging.warning("Unknown item: " + item)
if command[0] == '/hint':
for (team,slot), name in ctx.player_names.items():
if len(command) == 1:
print("Use /hint {Playername} {itemname}\nFor example /hint Berserker Lamp")
elif name.lower() == command[1].lower():
item = " ".join(command[2:])
if item in Items.item_table:
seeked_item_id = Items.item_table[item][3]
for check, result in ctx.locations.items():
item_id, receiving_player = result
if receiving_player == slot and item_id == seeked_item_id:
location_id, finding_player = check
name_finder = ctx.player_names[team, finding_player]
hint = f"[Hint]: {name}'s {item} can be found at " \
f"{get_location_name_from_address(location_id)} in {name_finder}'s World"
notify_team(ctx, team, hint)
else:
logging.warning("Unknown item: " + item)
if command[0][0] != '/':
notify_all(ctx, '[Server]: ' + input)
except:
import traceback
traceback.print_exc()
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', default=None)
parser.add_argument('--port', default=38281, type=int)
parser.add_argument('--password', default=None)
parser.add_argument('--multidata', default=None)
parser.add_argument('--savefile', default=None)
parser.add_argument('--disable_save', default=False, action='store_true')
parser.add_argument('--loglevel', default='info', choices=['debug', 'info', 'warning', 'error', 'critical'])
args = parser.parse_args()
logging.basicConfig(format='[%(asctime)s] %(message)s', level=getattr(logging, args.loglevel.upper(), logging.INFO))
ctx = Context(args.host, args.port, args.password)
ctx.data_filename = args.multidata
try:
if not ctx.data_filename:
import tkinter
import tkinter.filedialog
root = tkinter.Tk()
root.withdraw()
ctx.data_filename = tkinter.filedialog.askopenfilename(filetypes=(("Multiworld data","*multidata"),))
with open(ctx.data_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
for team, names in enumerate(jsonobj['names']):
for player, name in enumerate(names, 1):
ctx.player_names[(team, player)] = name
ctx.rom_names = {tuple(rom): (team, slot) for slot, team, rom in jsonobj['roms']}
ctx.remote_items = set(jsonobj['remote_items'])
ctx.locations = {tuple(k): tuple(v) for k, v in jsonobj['locations']}
except Exception as e:
logging.error('Failed to read multiworld data (%s)' % e)
return
ip = urllib.request.urlopen('https://v4.ident.me').read().decode('utf8') if not ctx.host else ctx.host
logging.info('Hosting game at %s:%d (%s)' % (ip, ctx.port, 'No password' if not ctx.password else 'Password: %s' % ctx.password))
ctx.disable_save = args.disable_save
if not ctx.disable_save:
if not ctx.save_filename:
ctx.save_filename = (ctx.data_filename[:-9] if ctx.data_filename[-9:] == 'multidata' else (ctx.data_filename + '_')) + 'multisave'
try:
with open(ctx.save_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
rom_names = jsonobj[0]
received_items = {tuple(k): [ReceivedItem(**i) for i in v] for k, v in jsonobj[1]}
if not all([ctx.rom_names[tuple(rom)] == (team, slot) for rom, (team, slot) in rom_names]):
raise Exception('Save file mismatch, will start a new game')
ctx.received_items = received_items
logging.info('Loaded save file with %d received items for %d players' % (sum([len(p) for p in received_items.values()]), len(received_items)))
except FileNotFoundError:
logging.error('No save data found, starting a new game')
except Exception as e:
logging.info(e)
ctx.server = websockets.serve(functools.partial(server,ctx=ctx), ctx.host, ctx.port, ping_timeout=None, ping_interval=None)
await ctx.server
await console(ctx)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.run_until_complete(asyncio.gather(*asyncio.Task.all_tasks()))
loop.close()
|
import os
import json
from flask import Flask, render_template
DATABASE_PATH = "../.contacts-store"
# Read database and build HTML string
file_names = os.listdir(DATABASE_PATH)
file_names.remove(".git")
html = "<table><th>Contact</th><th>Last Name</th><th>Tlf</th><th>Email</th><th>Job</th><th>Province</th>"
for file_name in file_names:
file_path = os.path.join(DATABASE_PATH, file_name)
with open(file_path, 'r') as f:
data = json.load(f)
data['name'] = file_name
html += f"<tr><td>{data["name"]}</td><td>{data["last_name"]}</td><td>{data["tlf"]}</td><td>{data["email"]}</td><td>{data["job"]}</td><td>{data["province"]}</td></tr>"
# Create Flask app
server = Flask(__name__)
@server.route("/")
def contacts_table():
return html
|
import os
import json
from flask import Flask, render_template
DATABASE_PATH = "../.contacts-store"
# Read database and build HTML string
file_names = os.listdir(DATABASE_PATH)
file_names.remove(".git")
html = "<table><th>Contact</th><th>Last Name</th><th>Tlf</th><th>Email</th><th>Job</th><th>Province</th>"
for file_name in file_names:
file_path = os.path.join(DATABASE_PATH, file_name)
with open(file_path, 'r') as f:
data = json.load(f)
data['name'] = file_name
html += f"<tr><td>{data['name']}</td><td>{data['last_name']}</td><td>{data['tlf']}</td><td>{data['email']}</td><td>{data['job']}</td><td>{data['province']}</td></tr>"
# Create Flask app
server = Flask(__name__)
@server.route("/")
def contacts_table():
return html
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.parallel import is_module_wrapper
from mmcv.runner.hooks import HOOKS, Hook
class BaseEMAHook(Hook):
"""Exponential Moving Average Hook.
Use Exponential Moving Average on all parameters of model in training
process. All parameters have a ema backup, which update by the formula
as below. EMAHook takes priority over EvalHook and CheckpointHook. Note,
the original model parameters are actually saved in ema field after train.
Args:
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`ema_param = (1-momentum) * ema_param + momentum * cur_param`.
Defaults to 0.0002.
skip_buffers (bool): Whether to skip the model buffers, such as
batchnorm running stats (running_mean, running_var), it does not
perform the ema operation. Default to False.
interval (int): Update ema parameter every interval iteration.
Defaults to 1.
resume_from (str, optional): The checkpoint path. Defaults to None.
momentum_fun (func, optional): The function to change momentum
during early iteration (also warmup) to help early training.
It uses `momentum` as a constant. Defaults to None.
"""
def __init__(self,
momentum=0.0002,
interval=1,
skip_buffers=False,
resume_from=None,
momentum_fun=None):
assert 0 < momentum < 1
self.momentum = momentum
self.skip_buffers = skip_buffers
self.interval = interval
self.checkpoint = resume_from
self.momentum_fun = momentum_fun
def before_run(self, runner):
"""To resume model with it's ema parameters more friendly.
Register ema parameter as ``named_buffer`` to model.
"""
model = runner.model
if is_module_wrapper(model):
model = model.module
self.param_ema_buffer = {}
if self.skip_buffers:
self.model_parameters = dict(model.named_parameters())
else:
self.model_parameters = model.state_dict()
for name, value in self.model_parameters.items():
# "." is not allowed in module's buffer name
buffer_name = f"ema_{name.replace(".", "_")}"
self.param_ema_buffer[name] = buffer_name
model.register_buffer(buffer_name, value.data.clone())
self.model_buffers = dict(model.named_buffers())
if self.checkpoint is not None:
runner.resume(self.checkpoint)
def get_momentum(self, runner):
return self.momentum_fun(runner.iter) if self.momentum_fun else \
self.momentum
def after_train_iter(self, runner):
"""Update ema parameter every self.interval iterations."""
if (runner.iter + 1) % self.interval != 0:
return
momentum = self.get_momentum(runner)
for name, parameter in self.model_parameters.items():
# exclude num_tracking
if parameter.dtype.is_floating_point:
buffer_name = self.param_ema_buffer[name]
buffer_parameter = self.model_buffers[buffer_name]
buffer_parameter.mul_(1 - momentum).add_(
parameter.data, alpha=momentum)
def after_train_epoch(self, runner):
"""We load parameter values from ema backup to model before the
EvalHook."""
self._swap_ema_parameters()
def before_train_epoch(self, runner):
"""We recover model's parameter from ema backup after last epoch's
EvalHook."""
self._swap_ema_parameters()
def _swap_ema_parameters(self):
"""Swap the parameter of model with parameter in ema_buffer."""
for name, value in self.model_parameters.items():
temp = value.data.clone()
ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
value.data.copy_(ema_buffer.data)
ema_buffer.data.copy_(temp)
@HOOKS.register_module()
class ExpMomentumEMAHook(BaseEMAHook):
"""EMAHook using exponential momentum strategy.
Args:
total_iter (int): The total number of iterations of EMA momentum.
Defaults to 2000.
"""
def __init__(self, total_iter=2000, **kwargs):
super(ExpMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = lambda x: (1 - self.momentum) * math.exp(-(
1 + x) / total_iter) + self.momentum
@HOOKS.register_module()
class LinearMomentumEMAHook(BaseEMAHook):
"""EMAHook using linear momentum strategy.
Args:
warm_up (int): During first warm_up steps, we may use smaller decay
to update ema parameters more slowly. Defaults to 100.
"""
def __init__(self, warm_up=100, **kwargs):
super(LinearMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = lambda x: min(self.momentum**self.interval,
(1 + x) / (warm_up + x))
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.parallel import is_module_wrapper
from mmcv.runner.hooks import HOOKS, Hook
class BaseEMAHook(Hook):
"""Exponential Moving Average Hook.
Use Exponential Moving Average on all parameters of model in training
process. All parameters have a ema backup, which update by the formula
as below. EMAHook takes priority over EvalHook and CheckpointHook. Note,
the original model parameters are actually saved in ema field after train.
Args:
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`ema_param = (1-momentum) * ema_param + momentum * cur_param`.
Defaults to 0.0002.
skip_buffers (bool): Whether to skip the model buffers, such as
batchnorm running stats (running_mean, running_var), it does not
perform the ema operation. Default to False.
interval (int): Update ema parameter every interval iteration.
Defaults to 1.
resume_from (str, optional): The checkpoint path. Defaults to None.
momentum_fun (func, optional): The function to change momentum
during early iteration (also warmup) to help early training.
It uses `momentum` as a constant. Defaults to None.
"""
def __init__(self,
momentum=0.0002,
interval=1,
skip_buffers=False,
resume_from=None,
momentum_fun=None):
assert 0 < momentum < 1
self.momentum = momentum
self.skip_buffers = skip_buffers
self.interval = interval
self.checkpoint = resume_from
self.momentum_fun = momentum_fun
def before_run(self, runner):
"""To resume model with it's ema parameters more friendly.
Register ema parameter as ``named_buffer`` to model.
"""
model = runner.model
if is_module_wrapper(model):
model = model.module
self.param_ema_buffer = {}
if self.skip_buffers:
self.model_parameters = dict(model.named_parameters())
else:
self.model_parameters = model.state_dict()
for name, value in self.model_parameters.items():
# "." is not allowed in module's buffer name
buffer_name = f"ema_{name.replace('.', '_')}"
self.param_ema_buffer[name] = buffer_name
model.register_buffer(buffer_name, value.data.clone())
self.model_buffers = dict(model.named_buffers())
if self.checkpoint is not None:
runner.resume(self.checkpoint)
def get_momentum(self, runner):
return self.momentum_fun(runner.iter) if self.momentum_fun else \
self.momentum
def after_train_iter(self, runner):
"""Update ema parameter every self.interval iterations."""
if (runner.iter + 1) % self.interval != 0:
return
momentum = self.get_momentum(runner)
for name, parameter in self.model_parameters.items():
# exclude num_tracking
if parameter.dtype.is_floating_point:
buffer_name = self.param_ema_buffer[name]
buffer_parameter = self.model_buffers[buffer_name]
buffer_parameter.mul_(1 - momentum).add_(
parameter.data, alpha=momentum)
def after_train_epoch(self, runner):
"""We load parameter values from ema backup to model before the
EvalHook."""
self._swap_ema_parameters()
def before_train_epoch(self, runner):
"""We recover model's parameter from ema backup after last epoch's
EvalHook."""
self._swap_ema_parameters()
def _swap_ema_parameters(self):
"""Swap the parameter of model with parameter in ema_buffer."""
for name, value in self.model_parameters.items():
temp = value.data.clone()
ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
value.data.copy_(ema_buffer.data)
ema_buffer.data.copy_(temp)
@HOOKS.register_module()
class ExpMomentumEMAHook(BaseEMAHook):
"""EMAHook using exponential momentum strategy.
Args:
total_iter (int): The total number of iterations of EMA momentum.
Defaults to 2000.
"""
def __init__(self, total_iter=2000, **kwargs):
super(ExpMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = lambda x: (1 - self.momentum) * math.exp(-(
1 + x) / total_iter) + self.momentum
@HOOKS.register_module()
class LinearMomentumEMAHook(BaseEMAHook):
"""EMAHook using linear momentum strategy.
Args:
warm_up (int): During first warm_up steps, we may use smaller decay
to update ema parameters more slowly. Defaults to 100.
"""
def __init__(self, warm_up=100, **kwargs):
super(LinearMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = lambda x: min(self.momentum**self.interval,
(1 + x) / (warm_up + x))
|
import threading
import numpy as np
import jesse.helpers as jh
from jesse.models.Candle import Candle
from jesse.models.CompletedTrade import CompletedTrade
from jesse.models.DailyBalance import DailyBalance
from jesse.models.Order import Order
from jesse.models.Orderbook import Orderbook
from jesse.models.Ticker import Ticker
from jesse.models.Trade import Trade
from jesse.services import logger
def store_candle_into_db(exchange: str, symbol: str, candle: np.ndarray) -> None:
d = {
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': exchange,
'timestamp': candle[0],
'open': candle[1],
'high': candle[3],
'low': candle[4],
'close': candle[2],
'volume': candle[5]
}
def async_save() -> None:
Candle.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
f"candle: {jh.timestamp_to_time(d["timestamp"])}-{exchange}-{symbol}: {candle}",
'blue'
)
)
# async call
threading.Thread(target=async_save).start()
def store_ticker_into_db(exchange: str, symbol: str, ticker: np.ndarray) -> None:
return
d = {
'id': jh.generate_unique_id(),
'timestamp': ticker[0],
'last_price': ticker[1],
'high_price': ticker[2],
'low_price': ticker[3],
'volume': ticker[4],
'symbol': symbol,
'exchange': exchange,
}
def async_save() -> None:
Ticker.insert(**d).on_conflict_ignore().execute()
print(
jh.color(f'ticker: {jh.timestamp_to_time(d['timestamp'])}-{exchange}-{symbol}: {ticker}', 'yellow')
)
# async call
threading.Thread(target=async_save).start()
def store_completed_trade_into_db(completed_trade: CompletedTrade) -> None:
return
d = {
'id': completed_trade.id,
'strategy_name': completed_trade.strategy_name,
'symbol': completed_trade.symbol,
'exchange': completed_trade.exchange,
'type': completed_trade.type,
'timeframe': completed_trade.timeframe,
'entry_price': completed_trade.entry_price,
'exit_price': completed_trade.exit_price,
'take_profit_at': completed_trade.take_profit_at,
'stop_loss_at': completed_trade.stop_loss_at,
'qty': completed_trade.qty,
'opened_at': completed_trade.opened_at,
'closed_at': completed_trade.closed_at,
'entry_candle_timestamp': completed_trade.entry_candle_timestamp,
'exit_candle_timestamp': completed_trade.exit_candle_timestamp,
'leverage': completed_trade.leverage,
}
def async_save() -> None:
CompletedTrade.insert(**d).execute()
if jh.is_debugging():
logger.info(f'Stored the completed trade record for {completed_trade.exchange}-{completed_trade.symbol}-{completed_trade.strategy_name} into database.')
# async call
threading.Thread(target=async_save).start()
def store_order_into_db(order: Order) -> None:
return
d = {
'id': order.id,
'trade_id': order.trade_id,
'exchange_id': order.exchange_id,
'vars': order.vars,
'symbol': order.symbol,
'exchange': order.exchange,
'side': order.side,
'type': order.type,
'flag': order.flag,
'qty': order.qty,
'price': order.price,
'status': order.status,
'created_at': order.created_at,
'executed_at': order.executed_at,
'canceled_at': order.canceled_at,
'role': order.role,
}
def async_save() -> None:
Order.insert(**d).execute()
if jh.is_debugging():
logger.info(f'Stored the executed order record for {order.exchange}-{order.symbol} into database.')
# async call
threading.Thread(target=async_save).start()
def store_daily_balance_into_db(daily_balance: dict) -> None:
return
def async_save():
DailyBalance.insert(**daily_balance).execute()
if jh.is_debugging():
logger.info(f'Stored daily portfolio balance record into the database: {daily_balance['asset']} => {jh.format_currency(round(daily_balance['balance'], 2))}'
)
# async call
threading.Thread(target=async_save).start()
def store_trade_into_db(exchange: str, symbol: str, trade: np.ndarray) -> None:
return
d = {
'id': jh.generate_unique_id(),
'timestamp': trade[0],
'price': trade[1],
'buy_qty': trade[2],
'sell_qty': trade[3],
'buy_count': trade[4],
'sell_count': trade[5],
'symbol': symbol,
'exchange': exchange,
}
def async_save() -> None:
Trade.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
f'trade: {jh.timestamp_to_time(d['timestamp'])}-{exchange}-{symbol}: {trade}',
'green'
)
)
# async call
threading.Thread(target=async_save).start()
def store_orderbook_into_db(exchange: str, symbol: str, orderbook: np.ndarray) -> None:
return
d = {
'id': jh.generate_unique_id(),
'timestamp': jh.now_to_timestamp(),
'data': orderbook.dumps(),
'symbol': symbol,
'exchange': exchange,
}
def async_save() -> None:
Orderbook.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
f'orderbook: {jh.timestamp_to_time(d['timestamp'])}-{exchange}-{symbol}: [{orderbook[0][0][0]}, {orderbook[0][0][1]}], [{orderbook[1][0][0]}, {orderbook[1][0][1]}]',
'magenta'
)
)
# async call
threading.Thread(target=async_save).start()
def fetch_candles_from_db(exchange: str, symbol: str, start_date: int, finish_date: int) -> tuple:
candles_tuple = tuple(
Candle.select(
Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
Candle.volume
).where(
Candle.timestamp.between(start_date, finish_date),
Candle.exchange == exchange,
Candle.symbol == symbol
).order_by(Candle.timestamp.asc()).tuples()
)
return candles_tuple
|
import threading
import numpy as np
import jesse.helpers as jh
from jesse.models.Candle import Candle
from jesse.models.CompletedTrade import CompletedTrade
from jesse.models.DailyBalance import DailyBalance
from jesse.models.Order import Order
from jesse.models.Orderbook import Orderbook
from jesse.models.Ticker import Ticker
from jesse.models.Trade import Trade
from jesse.services import logger
def store_candle_into_db(exchange: str, symbol: str, candle: np.ndarray) -> None:
d = {
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': exchange,
'timestamp': candle[0],
'open': candle[1],
'high': candle[3],
'low': candle[4],
'close': candle[2],
'volume': candle[5]
}
def async_save() -> None:
Candle.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
f"candle: {jh.timestamp_to_time(d['timestamp'])}-{exchange}-{symbol}: {candle}",
'blue'
)
)
# async call
threading.Thread(target=async_save).start()
def store_ticker_into_db(exchange: str, symbol: str, ticker: np.ndarray) -> None:
return
d = {
'id': jh.generate_unique_id(),
'timestamp': ticker[0],
'last_price': ticker[1],
'high_price': ticker[2],
'low_price': ticker[3],
'volume': ticker[4],
'symbol': symbol,
'exchange': exchange,
}
def async_save() -> None:
Ticker.insert(**d).on_conflict_ignore().execute()
print(
jh.color(f'ticker: {jh.timestamp_to_time(d["timestamp"])}-{exchange}-{symbol}: {ticker}', 'yellow')
)
# async call
threading.Thread(target=async_save).start()
def store_completed_trade_into_db(completed_trade: CompletedTrade) -> None:
return
d = {
'id': completed_trade.id,
'strategy_name': completed_trade.strategy_name,
'symbol': completed_trade.symbol,
'exchange': completed_trade.exchange,
'type': completed_trade.type,
'timeframe': completed_trade.timeframe,
'entry_price': completed_trade.entry_price,
'exit_price': completed_trade.exit_price,
'take_profit_at': completed_trade.take_profit_at,
'stop_loss_at': completed_trade.stop_loss_at,
'qty': completed_trade.qty,
'opened_at': completed_trade.opened_at,
'closed_at': completed_trade.closed_at,
'entry_candle_timestamp': completed_trade.entry_candle_timestamp,
'exit_candle_timestamp': completed_trade.exit_candle_timestamp,
'leverage': completed_trade.leverage,
}
def async_save() -> None:
CompletedTrade.insert(**d).execute()
if jh.is_debugging():
logger.info(f'Stored the completed trade record for {completed_trade.exchange}-{completed_trade.symbol}-{completed_trade.strategy_name} into database.')
# async call
threading.Thread(target=async_save).start()
def store_order_into_db(order: Order) -> None:
return
d = {
'id': order.id,
'trade_id': order.trade_id,
'exchange_id': order.exchange_id,
'vars': order.vars,
'symbol': order.symbol,
'exchange': order.exchange,
'side': order.side,
'type': order.type,
'flag': order.flag,
'qty': order.qty,
'price': order.price,
'status': order.status,
'created_at': order.created_at,
'executed_at': order.executed_at,
'canceled_at': order.canceled_at,
'role': order.role,
}
def async_save() -> None:
Order.insert(**d).execute()
if jh.is_debugging():
logger.info(f'Stored the executed order record for {order.exchange}-{order.symbol} into database.')
# async call
threading.Thread(target=async_save).start()
def store_daily_balance_into_db(daily_balance: dict) -> None:
return
def async_save():
DailyBalance.insert(**daily_balance).execute()
if jh.is_debugging():
logger.info(f'Stored daily portfolio balance record into the database: {daily_balance["asset"]} => {jh.format_currency(round(daily_balance["balance"], 2))}'
)
# async call
threading.Thread(target=async_save).start()
def store_trade_into_db(exchange: str, symbol: str, trade: np.ndarray) -> None:
return
d = {
'id': jh.generate_unique_id(),
'timestamp': trade[0],
'price': trade[1],
'buy_qty': trade[2],
'sell_qty': trade[3],
'buy_count': trade[4],
'sell_count': trade[5],
'symbol': symbol,
'exchange': exchange,
}
def async_save() -> None:
Trade.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
f'trade: {jh.timestamp_to_time(d["timestamp"])}-{exchange}-{symbol}: {trade}',
'green'
)
)
# async call
threading.Thread(target=async_save).start()
def store_orderbook_into_db(exchange: str, symbol: str, orderbook: np.ndarray) -> None:
return
d = {
'id': jh.generate_unique_id(),
'timestamp': jh.now_to_timestamp(),
'data': orderbook.dumps(),
'symbol': symbol,
'exchange': exchange,
}
def async_save() -> None:
Orderbook.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
f'orderbook: {jh.timestamp_to_time(d["timestamp"])}-{exchange}-{symbol}: [{orderbook[0][0][0]}, {orderbook[0][0][1]}], [{orderbook[1][0][0]}, {orderbook[1][0][1]}]',
'magenta'
)
)
# async call
threading.Thread(target=async_save).start()
def fetch_candles_from_db(exchange: str, symbol: str, start_date: int, finish_date: int) -> tuple:
candles_tuple = tuple(
Candle.select(
Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
Candle.volume
).where(
Candle.timestamp.between(start_date, finish_date),
Candle.exchange == exchange,
Candle.symbol == symbol
).order_by(Candle.timestamp.asc()).tuples()
)
return candles_tuple
|
import copy
from typing import Optional
from vyper import ast as vy_ast
from vyper.ast.validation import validate_call_args
from vyper.exceptions import (
ExceptionList,
FunctionDeclarationException,
ImmutableViolation,
InvalidLiteral,
InvalidOperation,
InvalidType,
IteratorException,
NonPayableViolation,
StateAccessViolation,
StructureException,
TypeMismatch,
VariableDeclarationException,
VyperException,
)
# TODO consolidate some of these imports
from vyper.semantics.environment import CONSTANT_ENVIRONMENT_VARS, MUTABLE_ENVIRONMENT_VARS
from vyper.semantics.namespace import get_namespace
from vyper.semantics.types.abstract import IntegerAbstractType
from vyper.semantics.types.bases import DataLocation
from vyper.semantics.types.function import (
ContractFunction,
MemberFunctionDefinition,
StateMutability,
)
from vyper.semantics.types.indexable.mapping import MappingDefinition
from vyper.semantics.types.indexable.sequence import (
ArrayDefinition,
DynamicArrayDefinition,
TupleDefinition,
)
from vyper.semantics.types.user.event import Event
from vyper.semantics.types.utils import get_type_from_annotation
from vyper.semantics.types.value.address import AddressDefinition
from vyper.semantics.types.value.array_value import StringDefinition
from vyper.semantics.types.value.boolean import BoolDefinition
from vyper.semantics.validation.annotation import StatementAnnotationVisitor
from vyper.semantics.validation.base import VyperNodeVisitorBase
from vyper.semantics.validation.utils import (
get_common_types,
get_exact_type_from_node,
get_possible_types_from_node,
validate_expected_type,
)
def validate_functions(vy_module: vy_ast.Module) -> None:
"""Analyzes a vyper ast and validates the function-level namespaces."""
err_list = ExceptionList()
namespace = get_namespace()
for node in vy_module.get_children(vy_ast.FunctionDef):
with namespace.enter_scope():
try:
FunctionNodeVisitor(vy_module, node, namespace)
except VyperException as e:
err_list.append(e)
err_list.raise_if_not_empty()
def _is_terminus_node(node: vy_ast.VyperNode) -> bool:
if getattr(node, "_is_terminus", None):
return True
if isinstance(node, vy_ast.Expr) and isinstance(node.value, vy_ast.Call):
func = get_exact_type_from_node(node.value.func)
if getattr(func, "_is_terminus", None):
return True
return False
def check_for_terminus(node_list: list) -> bool:
if next((i for i in node_list if _is_terminus_node(i)), None):
return True
for node in [i for i in node_list if isinstance(i, vy_ast.If)][::-1]:
if not node.orelse or not check_for_terminus(node.orelse):
continue
if not check_for_terminus(node.body):
continue
return True
return False
def _check_iterator_modification(
target_node: vy_ast.VyperNode, search_node: vy_ast.VyperNode
) -> Optional[vy_ast.VyperNode]:
similar_nodes = [
n
for n in search_node.get_descendants(type(target_node))
if vy_ast.compare_nodes(target_node, n)
]
for node in similar_nodes:
# raise if the node is the target of an assignment statement
assign_node = node.get_ancestor((vy_ast.Assign, vy_ast.AugAssign))
# note the use of get_descendants() blocks statements like
# self.my_array[i] = x
if assign_node and node in assign_node.target.get_descendants(include_self=True):
return node
attr_node = node.get_ancestor(vy_ast.Attribute)
# note the use of get_descendants() blocks statements like
# self.my_array[i].append(x)
if (
attr_node is not None
and node in attr_node.value.get_descendants(include_self=True)
and attr_node.attr in ("append", "pop", "extend")
):
return node
return None
def _validate_revert_reason(msg_node: vy_ast.VyperNode) -> None:
if msg_node:
if isinstance(msg_node, vy_ast.Str):
if not msg_node.value.strip():
raise StructureException("Reason string cannot be empty", msg_node)
elif not (isinstance(msg_node, vy_ast.Name) and msg_node.id == "UNREACHABLE"):
try:
validate_expected_type(msg_node, StringDefinition(1024))
except TypeMismatch as e:
raise InvalidType("revert reason must fit within String[1024]") from e
def _validate_address_code_attribute(node: vy_ast.Attribute) -> None:
value_type = get_exact_type_from_node(node.value)
if isinstance(value_type, AddressDefinition) and node.attr == "code":
# Validate `slice(<address>.code, start, length)` where `length` is constant
parent = node.get_ancestor()
if isinstance(parent, vy_ast.Call):
ok_func = isinstance(parent.func, vy_ast.Name) and parent.func.id == "slice"
ok_args = len(parent.args) == 3 and isinstance(parent.args[2], vy_ast.Int)
if ok_func and ok_args:
return
raise StructureException(
"(address).code is only allowed inside of a slice function with a constant length",
node,
)
def _validate_msg_data_attribute(node: vy_ast.Attribute) -> None:
if isinstance(node.value, vy_ast.Name) and node.value.id == "msg" and node.attr == "data":
parent = node.get_ancestor()
if not isinstance(parent, vy_ast.Call) or parent.get("func.id") not in ("slice", "len"):
raise StructureException(
"msg.data is only allowed inside of the slice or len functions",
node,
)
if parent.get("func.id") == "slice":
ok_args = len(parent.args) == 3 and isinstance(parent.args[2], vy_ast.Int)
if not ok_args:
raise StructureException(
"slice(msg.data) must use a compile-time constant for length argument",
parent,
)
class FunctionNodeVisitor(VyperNodeVisitorBase):
ignored_types = (
vy_ast.Break,
vy_ast.Constant,
vy_ast.Pass,
)
scope_name = "function"
def __init__(
self, vyper_module: vy_ast.Module, fn_node: vy_ast.FunctionDef, namespace: dict
) -> None:
self.vyper_module = vyper_module
self.fn_node = fn_node
self.namespace = namespace
self.func = fn_node._metadata["type"]
self.annotation_visitor = StatementAnnotationVisitor(fn_node, namespace)
self.expr_visitor = _LocalExpressionVisitor()
namespace.update(self.func.arguments)
for node in fn_node.body:
self.visit(node)
if self.func.return_type:
if not check_for_terminus(fn_node.body):
raise FunctionDeclarationException(
f"Missing or unmatched return statements in function '{fn_node.name}'",
fn_node,
)
if self.func.mutability == StateMutability.PURE:
node_list = fn_node.get_descendants(
vy_ast.Attribute,
{
"value.id": set(CONSTANT_ENVIRONMENT_VARS.keys()).union(
set(MUTABLE_ENVIRONMENT_VARS.keys())
)
},
)
for node in node_list:
t = node._metadata.get("type")
if isinstance(t, ContractFunction) and t.mutability == StateMutability.PURE:
# allowed
continue
raise StateAccessViolation(
"not allowed to query contract or environment variables in pure functions",
node_list[0],
)
if self.func.mutability is not StateMutability.PAYABLE:
node_list = fn_node.get_descendants(
vy_ast.Attribute, {"value.id": "msg", "attr": "value"}
)
if node_list:
raise NonPayableViolation(
"msg.value is not allowed in non-payable functions", node_list[0]
)
def visit(self, node):
super().visit(node)
self.annotation_visitor.visit(node)
def visit_AnnAssign(self, node):
name = node.get("target.id")
if name is None:
raise VariableDeclarationException("Invalid assignment", node)
if not node.value:
raise VariableDeclarationException(
"Memory variables must be declared with an initial value", node
)
type_definition = get_type_from_annotation(node.annotation, DataLocation.MEMORY)
validate_expected_type(node.value, type_definition)
try:
self.namespace[name] = type_definition
except VyperException as exc:
raise exc.with_annotation(node) from None
self.expr_visitor.visit(node.value)
def visit_Assign(self, node):
if isinstance(node.value, vy_ast.Tuple):
raise StructureException("Right-hand side of assignment cannot be a tuple", node.value)
target = get_exact_type_from_node(node.target)
if isinstance(target, MappingDefinition):
raise StructureException(
"Left-hand side of assignment cannot be a HashMap without a key", node
)
validate_expected_type(node.value, target)
target.validate_modification(node, self.func.mutability)
self.expr_visitor.visit(node.value)
self.expr_visitor.visit(node.target)
def visit_AugAssign(self, node):
if isinstance(node.value, vy_ast.Tuple):
raise StructureException("Right-hand side of assignment cannot be a tuple", node.value)
target = get_exact_type_from_node(node.target)
validate_expected_type(node.value, target)
target.validate_modification(node, self.func.mutability)
self.expr_visitor.visit(node.value)
def visit_Raise(self, node):
if node.exc:
_validate_revert_reason(node.exc)
self.expr_visitor.visit(node.exc)
def visit_Assert(self, node):
if node.msg:
_validate_revert_reason(node.msg)
self.expr_visitor.visit(node.msg)
try:
validate_expected_type(node.test, BoolDefinition())
except InvalidType:
raise InvalidType("Assertion test value must be a boolean", node.test)
self.expr_visitor.visit(node.test)
def visit_Continue(self, node):
for_node = node.get_ancestor(vy_ast.For)
if for_node is None:
raise StructureException("`continue` must be enclosed in a `for` loop", node)
def visit_Return(self, node):
values = node.value
if values is None:
if self.func.return_type:
raise FunctionDeclarationException("Return statement is missing a value", node)
return
elif self.func.return_type is None:
raise FunctionDeclarationException("Function does not return any values", node)
if isinstance(values, vy_ast.Tuple):
values = values.elements
if not isinstance(self.func.return_type, TupleDefinition):
raise FunctionDeclarationException("Function only returns a single value", node)
if self.func.return_type.length != len(values):
raise FunctionDeclarationException(
f"Incorrect number of return values: "
f"expected {self.func.return_type.length}, got {len(values)}",
node,
)
for given, expected in zip(values, self.func.return_type.value_type):
validate_expected_type(given, expected)
else:
validate_expected_type(values, self.func.return_type)
self.expr_visitor.visit(node.value)
def visit_If(self, node):
validate_expected_type(node.test, BoolDefinition())
self.expr_visitor.visit(node.test)
with self.namespace.enter_scope():
for n in node.body:
self.visit(n)
with self.namespace.enter_scope():
for n in node.orelse:
self.visit(n)
def visit_For(self, node):
if isinstance(node.iter, vy_ast.Subscript):
raise StructureException("Cannot iterate over a nested list", node.iter)
if isinstance(node.iter, vy_ast.Call):
# iteration via range()
if node.iter.get("func.id") != "range":
raise IteratorException(
"Cannot iterate over the result of a function call", node.iter
)
validate_call_args(node.iter, (1, 2))
args = node.iter.args
if len(args) == 1:
# range(CONSTANT)
if not isinstance(args[0], vy_ast.Num):
raise StateAccessViolation("Value must be a literal", node)
if args[0].value <= 0:
raise StructureException("For loop must have at least 1 iteration", args[0])
validate_expected_type(args[0], IntegerAbstractType())
type_list = get_possible_types_from_node(args[0])
else:
validate_expected_type(args[0], IntegerAbstractType())
type_list = get_common_types(*args)
if not isinstance(args[0], vy_ast.Constant):
# range(x, x + CONSTANT)
if not isinstance(args[1], vy_ast.BinOp) or not isinstance(
args[1].op, vy_ast.Add
):
raise StructureException(
"Second element must be the first element plus a literal value",
args[0],
)
if not vy_ast.compare_nodes(args[0], args[1].left):
raise StructureException(
"First and second variable must be the same", args[1].left
)
if not isinstance(args[1].right, vy_ast.Int):
raise InvalidLiteral("Literal must be an integer", args[1].right)
if args[1].right.value < 1:
raise StructureException(
f"For loop has invalid number of iterations ({args[1].right.value}),"
" the value must be greater than zero",
args[1].right,
)
else:
# range(CONSTANT, CONSTANT)
if not isinstance(args[1], vy_ast.Int):
raise InvalidType("Value must be a literal integer", args[1])
validate_expected_type(args[1], IntegerAbstractType())
if args[0].value >= args[1].value:
raise StructureException("Second value must be > first value", args[1])
else:
# iteration over a variable or literal list
type_list = [
i.value_type
for i in get_possible_types_from_node(node.iter)
if isinstance(i, (DynamicArrayDefinition, ArrayDefinition))
]
if not type_list:
raise InvalidType("Not an iterable type", node.iter)
if isinstance(node.iter, (vy_ast.Name, vy_ast.Attribute)):
# check for references to the iterated value within the body of the loop
assign = _check_iterator_modification(node.iter, node)
if assign:
raise ImmutableViolation("Cannot modify array during iteration", assign)
# Check if `iter` is a storage variable. get_descendants` is used to check for
# nested `self` (e.g. structs)
iter_is_storage_var = (
isinstance(node.iter, vy_ast.Attribute)
and len(node.iter.get_descendants(vy_ast.Name, {"id": "self"})) > 0
)
if iter_is_storage_var:
# check if iterated value may be modified by function calls inside the loop
iter_name = node.iter.attr
for call_node in node.get_descendants(vy_ast.Call, {"func.value.id": "self"}):
fn_name = call_node.func.attr
fn_node = self.vyper_module.get_children(vy_ast.FunctionDef, {"name": fn_name})[0]
if _check_iterator_modification(node.iter, fn_node):
# check for direct modification
raise ImmutableViolation(
f"Cannot call '{fn_name}' inside for loop, it potentially "
f"modifies iterated storage variable '{iter_name}'",
call_node,
)
for name in self.namespace["self"].members[fn_name].recursive_calls:
# check for indirect modification
fn_node = self.vyper_module.get_children(vy_ast.FunctionDef, {"name": name})[0]
if _check_iterator_modification(node.iter, fn_node):
raise ImmutableViolation(
f"Cannot call '{fn_name}' inside for loop, it may call to '{name}' "
f"which potentially modifies iterated storage variable '{iter_name}'",
call_node,
)
self.expr_visitor.visit(node.iter)
for_loop_exceptions = []
iter_name = node.target.id
for type_ in type_list:
# type check the for loop body using each possible type for iterator value
type_ = copy.deepcopy(type_)
type_.is_constant = True
with self.namespace.enter_scope():
try:
self.namespace[iter_name] = type_
except VyperException as exc:
raise exc.with_annotation(node) from None
try:
for n in node.body:
self.visit(n)
# type information is applied directly because the scope is
# closed prior to the call to `StatementAnnotationVisitor`
node.target._metadata["type"] = type_
return
except (TypeMismatch, InvalidOperation) as exc:
for_loop_exceptions.append(exc)
if len(set(str(i) for i in for_loop_exceptions)) == 1:
# if every attempt at type checking raised the same exception
raise for_loop_exceptions[0]
# return an aggregate TypeMismatch that shows all possible exceptions
# depending on which type is used
types_str = [str(i) for i in type_list]
given_str = f"{", ".join(types_str[:1])} or {types_str[-1]}"
raise TypeMismatch(
f"Iterator value '{iter_name}' may be cast as {given_str}, "
"but type checking fails with all possible types:",
node,
*(
(f"Casting '{iter_name}' as {type_}: {exc.message}", exc.annotations[0])
for type_, exc in zip(type_list, for_loop_exceptions)
),
)
def visit_Expr(self, node):
if not isinstance(node.value, vy_ast.Call):
raise StructureException("Expressions without assignment are disallowed", node)
fn_type = get_exact_type_from_node(node.value.func)
if isinstance(fn_type, Event):
raise StructureException("To call an event you must use the `log` statement", node)
if isinstance(fn_type, ContractFunction):
if (
fn_type.mutability > StateMutability.VIEW
and self.func.mutability <= StateMutability.VIEW
):
raise StateAccessViolation(
f"Cannot call a mutating function from a {self.func.mutability.value} function",
node,
)
if (
self.func.mutability == StateMutability.PURE
and fn_type.mutability != StateMutability.PURE
):
raise StateAccessViolation(
"Cannot call non-pure function from a pure function", node
)
if isinstance(fn_type, MemberFunctionDefinition) and fn_type.is_modifying:
fn_type.underlying_type.validate_modification(node, self.func.mutability)
# NOTE: fetch_call_return validates call args.
return_value = fn_type.fetch_call_return(node.value)
if (
return_value
and not isinstance(fn_type, MemberFunctionDefinition)
and not isinstance(fn_type, ContractFunction)
):
raise StructureException(
f"Function '{fn_type._id}' cannot be called without assigning the result", node
)
self.expr_visitor.visit(node.value)
def visit_Log(self, node):
if not isinstance(node.value, vy_ast.Call):
raise StructureException("Log must call an event", node)
event = get_exact_type_from_node(node.value.func)
if not isinstance(event, Event):
raise StructureException("Value is not an event", node.value)
event.fetch_call_return(node.value)
self.expr_visitor.visit(node.value)
class _LocalExpressionVisitor(VyperNodeVisitorBase):
ignored_types = (vy_ast.Constant, vy_ast.Name)
scope_name = "function"
def visit_Attribute(self, node: vy_ast.Attribute) -> None:
self.visit(node.value)
_validate_msg_data_attribute(node)
_validate_address_code_attribute(node)
def visit_BinOp(self, node: vy_ast.BinOp) -> None:
self.visit(node.left)
self.visit(node.right)
def visit_BoolOp(self, node: vy_ast.BoolOp) -> None:
for value in node.values: # type: ignore[attr-defined]
self.visit(value)
def visit_Call(self, node: vy_ast.Call) -> None:
self.visit(node.func)
for arg in node.args:
self.visit(arg)
for kwarg in node.keywords:
self.visit(kwarg.value)
def visit_Compare(self, node: vy_ast.Compare) -> None:
self.visit(node.left) # type: ignore[attr-defined]
self.visit(node.right) # type: ignore[attr-defined]
def visit_Dict(self, node: vy_ast.Dict) -> None:
for key in node.keys:
self.visit(key)
for value in node.values:
self.visit(value)
def visit_Index(self, node: vy_ast.Index) -> None:
self.visit(node.value)
def visit_List(self, node: vy_ast.List) -> None:
for element in node.elements:
self.visit(element)
def visit_Subscript(self, node: vy_ast.Subscript) -> None:
self.visit(node.value)
self.visit(node.slice)
def visit_Tuple(self, node: vy_ast.Tuple) -> None:
for element in node.elements:
self.visit(element)
def visit_UnaryOp(self, node: vy_ast.UnaryOp) -> None:
self.visit(node.operand) # type: ignore[attr-defined]
|
import copy
from typing import Optional
from vyper import ast as vy_ast
from vyper.ast.validation import validate_call_args
from vyper.exceptions import (
ExceptionList,
FunctionDeclarationException,
ImmutableViolation,
InvalidLiteral,
InvalidOperation,
InvalidType,
IteratorException,
NonPayableViolation,
StateAccessViolation,
StructureException,
TypeMismatch,
VariableDeclarationException,
VyperException,
)
# TODO consolidate some of these imports
from vyper.semantics.environment import CONSTANT_ENVIRONMENT_VARS, MUTABLE_ENVIRONMENT_VARS
from vyper.semantics.namespace import get_namespace
from vyper.semantics.types.abstract import IntegerAbstractType
from vyper.semantics.types.bases import DataLocation
from vyper.semantics.types.function import (
ContractFunction,
MemberFunctionDefinition,
StateMutability,
)
from vyper.semantics.types.indexable.mapping import MappingDefinition
from vyper.semantics.types.indexable.sequence import (
ArrayDefinition,
DynamicArrayDefinition,
TupleDefinition,
)
from vyper.semantics.types.user.event import Event
from vyper.semantics.types.utils import get_type_from_annotation
from vyper.semantics.types.value.address import AddressDefinition
from vyper.semantics.types.value.array_value import StringDefinition
from vyper.semantics.types.value.boolean import BoolDefinition
from vyper.semantics.validation.annotation import StatementAnnotationVisitor
from vyper.semantics.validation.base import VyperNodeVisitorBase
from vyper.semantics.validation.utils import (
get_common_types,
get_exact_type_from_node,
get_possible_types_from_node,
validate_expected_type,
)
def validate_functions(vy_module: vy_ast.Module) -> None:
"""Analyzes a vyper ast and validates the function-level namespaces."""
err_list = ExceptionList()
namespace = get_namespace()
for node in vy_module.get_children(vy_ast.FunctionDef):
with namespace.enter_scope():
try:
FunctionNodeVisitor(vy_module, node, namespace)
except VyperException as e:
err_list.append(e)
err_list.raise_if_not_empty()
def _is_terminus_node(node: vy_ast.VyperNode) -> bool:
if getattr(node, "_is_terminus", None):
return True
if isinstance(node, vy_ast.Expr) and isinstance(node.value, vy_ast.Call):
func = get_exact_type_from_node(node.value.func)
if getattr(func, "_is_terminus", None):
return True
return False
def check_for_terminus(node_list: list) -> bool:
if next((i for i in node_list if _is_terminus_node(i)), None):
return True
for node in [i for i in node_list if isinstance(i, vy_ast.If)][::-1]:
if not node.orelse or not check_for_terminus(node.orelse):
continue
if not check_for_terminus(node.body):
continue
return True
return False
def _check_iterator_modification(
target_node: vy_ast.VyperNode, search_node: vy_ast.VyperNode
) -> Optional[vy_ast.VyperNode]:
similar_nodes = [
n
for n in search_node.get_descendants(type(target_node))
if vy_ast.compare_nodes(target_node, n)
]
for node in similar_nodes:
# raise if the node is the target of an assignment statement
assign_node = node.get_ancestor((vy_ast.Assign, vy_ast.AugAssign))
# note the use of get_descendants() blocks statements like
# self.my_array[i] = x
if assign_node and node in assign_node.target.get_descendants(include_self=True):
return node
attr_node = node.get_ancestor(vy_ast.Attribute)
# note the use of get_descendants() blocks statements like
# self.my_array[i].append(x)
if (
attr_node is not None
and node in attr_node.value.get_descendants(include_self=True)
and attr_node.attr in ("append", "pop", "extend")
):
return node
return None
def _validate_revert_reason(msg_node: vy_ast.VyperNode) -> None:
if msg_node:
if isinstance(msg_node, vy_ast.Str):
if not msg_node.value.strip():
raise StructureException("Reason string cannot be empty", msg_node)
elif not (isinstance(msg_node, vy_ast.Name) and msg_node.id == "UNREACHABLE"):
try:
validate_expected_type(msg_node, StringDefinition(1024))
except TypeMismatch as e:
raise InvalidType("revert reason must fit within String[1024]") from e
def _validate_address_code_attribute(node: vy_ast.Attribute) -> None:
value_type = get_exact_type_from_node(node.value)
if isinstance(value_type, AddressDefinition) and node.attr == "code":
# Validate `slice(<address>.code, start, length)` where `length` is constant
parent = node.get_ancestor()
if isinstance(parent, vy_ast.Call):
ok_func = isinstance(parent.func, vy_ast.Name) and parent.func.id == "slice"
ok_args = len(parent.args) == 3 and isinstance(parent.args[2], vy_ast.Int)
if ok_func and ok_args:
return
raise StructureException(
"(address).code is only allowed inside of a slice function with a constant length",
node,
)
def _validate_msg_data_attribute(node: vy_ast.Attribute) -> None:
if isinstance(node.value, vy_ast.Name) and node.value.id == "msg" and node.attr == "data":
parent = node.get_ancestor()
if not isinstance(parent, vy_ast.Call) or parent.get("func.id") not in ("slice", "len"):
raise StructureException(
"msg.data is only allowed inside of the slice or len functions",
node,
)
if parent.get("func.id") == "slice":
ok_args = len(parent.args) == 3 and isinstance(parent.args[2], vy_ast.Int)
if not ok_args:
raise StructureException(
"slice(msg.data) must use a compile-time constant for length argument",
parent,
)
class FunctionNodeVisitor(VyperNodeVisitorBase):
ignored_types = (
vy_ast.Break,
vy_ast.Constant,
vy_ast.Pass,
)
scope_name = "function"
def __init__(
self, vyper_module: vy_ast.Module, fn_node: vy_ast.FunctionDef, namespace: dict
) -> None:
self.vyper_module = vyper_module
self.fn_node = fn_node
self.namespace = namespace
self.func = fn_node._metadata["type"]
self.annotation_visitor = StatementAnnotationVisitor(fn_node, namespace)
self.expr_visitor = _LocalExpressionVisitor()
namespace.update(self.func.arguments)
for node in fn_node.body:
self.visit(node)
if self.func.return_type:
if not check_for_terminus(fn_node.body):
raise FunctionDeclarationException(
f"Missing or unmatched return statements in function '{fn_node.name}'",
fn_node,
)
if self.func.mutability == StateMutability.PURE:
node_list = fn_node.get_descendants(
vy_ast.Attribute,
{
"value.id": set(CONSTANT_ENVIRONMENT_VARS.keys()).union(
set(MUTABLE_ENVIRONMENT_VARS.keys())
)
},
)
for node in node_list:
t = node._metadata.get("type")
if isinstance(t, ContractFunction) and t.mutability == StateMutability.PURE:
# allowed
continue
raise StateAccessViolation(
"not allowed to query contract or environment variables in pure functions",
node_list[0],
)
if self.func.mutability is not StateMutability.PAYABLE:
node_list = fn_node.get_descendants(
vy_ast.Attribute, {"value.id": "msg", "attr": "value"}
)
if node_list:
raise NonPayableViolation(
"msg.value is not allowed in non-payable functions", node_list[0]
)
def visit(self, node):
super().visit(node)
self.annotation_visitor.visit(node)
def visit_AnnAssign(self, node):
name = node.get("target.id")
if name is None:
raise VariableDeclarationException("Invalid assignment", node)
if not node.value:
raise VariableDeclarationException(
"Memory variables must be declared with an initial value", node
)
type_definition = get_type_from_annotation(node.annotation, DataLocation.MEMORY)
validate_expected_type(node.value, type_definition)
try:
self.namespace[name] = type_definition
except VyperException as exc:
raise exc.with_annotation(node) from None
self.expr_visitor.visit(node.value)
def visit_Assign(self, node):
if isinstance(node.value, vy_ast.Tuple):
raise StructureException("Right-hand side of assignment cannot be a tuple", node.value)
target = get_exact_type_from_node(node.target)
if isinstance(target, MappingDefinition):
raise StructureException(
"Left-hand side of assignment cannot be a HashMap without a key", node
)
validate_expected_type(node.value, target)
target.validate_modification(node, self.func.mutability)
self.expr_visitor.visit(node.value)
self.expr_visitor.visit(node.target)
def visit_AugAssign(self, node):
if isinstance(node.value, vy_ast.Tuple):
raise StructureException("Right-hand side of assignment cannot be a tuple", node.value)
target = get_exact_type_from_node(node.target)
validate_expected_type(node.value, target)
target.validate_modification(node, self.func.mutability)
self.expr_visitor.visit(node.value)
def visit_Raise(self, node):
if node.exc:
_validate_revert_reason(node.exc)
self.expr_visitor.visit(node.exc)
def visit_Assert(self, node):
if node.msg:
_validate_revert_reason(node.msg)
self.expr_visitor.visit(node.msg)
try:
validate_expected_type(node.test, BoolDefinition())
except InvalidType:
raise InvalidType("Assertion test value must be a boolean", node.test)
self.expr_visitor.visit(node.test)
def visit_Continue(self, node):
for_node = node.get_ancestor(vy_ast.For)
if for_node is None:
raise StructureException("`continue` must be enclosed in a `for` loop", node)
def visit_Return(self, node):
values = node.value
if values is None:
if self.func.return_type:
raise FunctionDeclarationException("Return statement is missing a value", node)
return
elif self.func.return_type is None:
raise FunctionDeclarationException("Function does not return any values", node)
if isinstance(values, vy_ast.Tuple):
values = values.elements
if not isinstance(self.func.return_type, TupleDefinition):
raise FunctionDeclarationException("Function only returns a single value", node)
if self.func.return_type.length != len(values):
raise FunctionDeclarationException(
f"Incorrect number of return values: "
f"expected {self.func.return_type.length}, got {len(values)}",
node,
)
for given, expected in zip(values, self.func.return_type.value_type):
validate_expected_type(given, expected)
else:
validate_expected_type(values, self.func.return_type)
self.expr_visitor.visit(node.value)
def visit_If(self, node):
validate_expected_type(node.test, BoolDefinition())
self.expr_visitor.visit(node.test)
with self.namespace.enter_scope():
for n in node.body:
self.visit(n)
with self.namespace.enter_scope():
for n in node.orelse:
self.visit(n)
def visit_For(self, node):
if isinstance(node.iter, vy_ast.Subscript):
raise StructureException("Cannot iterate over a nested list", node.iter)
if isinstance(node.iter, vy_ast.Call):
# iteration via range()
if node.iter.get("func.id") != "range":
raise IteratorException(
"Cannot iterate over the result of a function call", node.iter
)
validate_call_args(node.iter, (1, 2))
args = node.iter.args
if len(args) == 1:
# range(CONSTANT)
if not isinstance(args[0], vy_ast.Num):
raise StateAccessViolation("Value must be a literal", node)
if args[0].value <= 0:
raise StructureException("For loop must have at least 1 iteration", args[0])
validate_expected_type(args[0], IntegerAbstractType())
type_list = get_possible_types_from_node(args[0])
else:
validate_expected_type(args[0], IntegerAbstractType())
type_list = get_common_types(*args)
if not isinstance(args[0], vy_ast.Constant):
# range(x, x + CONSTANT)
if not isinstance(args[1], vy_ast.BinOp) or not isinstance(
args[1].op, vy_ast.Add
):
raise StructureException(
"Second element must be the first element plus a literal value",
args[0],
)
if not vy_ast.compare_nodes(args[0], args[1].left):
raise StructureException(
"First and second variable must be the same", args[1].left
)
if not isinstance(args[1].right, vy_ast.Int):
raise InvalidLiteral("Literal must be an integer", args[1].right)
if args[1].right.value < 1:
raise StructureException(
f"For loop has invalid number of iterations ({args[1].right.value}),"
" the value must be greater than zero",
args[1].right,
)
else:
# range(CONSTANT, CONSTANT)
if not isinstance(args[1], vy_ast.Int):
raise InvalidType("Value must be a literal integer", args[1])
validate_expected_type(args[1], IntegerAbstractType())
if args[0].value >= args[1].value:
raise StructureException("Second value must be > first value", args[1])
else:
# iteration over a variable or literal list
type_list = [
i.value_type
for i in get_possible_types_from_node(node.iter)
if isinstance(i, (DynamicArrayDefinition, ArrayDefinition))
]
if not type_list:
raise InvalidType("Not an iterable type", node.iter)
if isinstance(node.iter, (vy_ast.Name, vy_ast.Attribute)):
# check for references to the iterated value within the body of the loop
assign = _check_iterator_modification(node.iter, node)
if assign:
raise ImmutableViolation("Cannot modify array during iteration", assign)
# Check if `iter` is a storage variable. get_descendants` is used to check for
# nested `self` (e.g. structs)
iter_is_storage_var = (
isinstance(node.iter, vy_ast.Attribute)
and len(node.iter.get_descendants(vy_ast.Name, {"id": "self"})) > 0
)
if iter_is_storage_var:
# check if iterated value may be modified by function calls inside the loop
iter_name = node.iter.attr
for call_node in node.get_descendants(vy_ast.Call, {"func.value.id": "self"}):
fn_name = call_node.func.attr
fn_node = self.vyper_module.get_children(vy_ast.FunctionDef, {"name": fn_name})[0]
if _check_iterator_modification(node.iter, fn_node):
# check for direct modification
raise ImmutableViolation(
f"Cannot call '{fn_name}' inside for loop, it potentially "
f"modifies iterated storage variable '{iter_name}'",
call_node,
)
for name in self.namespace["self"].members[fn_name].recursive_calls:
# check for indirect modification
fn_node = self.vyper_module.get_children(vy_ast.FunctionDef, {"name": name})[0]
if _check_iterator_modification(node.iter, fn_node):
raise ImmutableViolation(
f"Cannot call '{fn_name}' inside for loop, it may call to '{name}' "
f"which potentially modifies iterated storage variable '{iter_name}'",
call_node,
)
self.expr_visitor.visit(node.iter)
for_loop_exceptions = []
iter_name = node.target.id
for type_ in type_list:
# type check the for loop body using each possible type for iterator value
type_ = copy.deepcopy(type_)
type_.is_constant = True
with self.namespace.enter_scope():
try:
self.namespace[iter_name] = type_
except VyperException as exc:
raise exc.with_annotation(node) from None
try:
for n in node.body:
self.visit(n)
# type information is applied directly because the scope is
# closed prior to the call to `StatementAnnotationVisitor`
node.target._metadata["type"] = type_
return
except (TypeMismatch, InvalidOperation) as exc:
for_loop_exceptions.append(exc)
if len(set(str(i) for i in for_loop_exceptions)) == 1:
# if every attempt at type checking raised the same exception
raise for_loop_exceptions[0]
# return an aggregate TypeMismatch that shows all possible exceptions
# depending on which type is used
types_str = [str(i) for i in type_list]
given_str = f"{', '.join(types_str[:1])} or {types_str[-1]}"
raise TypeMismatch(
f"Iterator value '{iter_name}' may be cast as {given_str}, "
"but type checking fails with all possible types:",
node,
*(
(f"Casting '{iter_name}' as {type_}: {exc.message}", exc.annotations[0])
for type_, exc in zip(type_list, for_loop_exceptions)
),
)
def visit_Expr(self, node):
if not isinstance(node.value, vy_ast.Call):
raise StructureException("Expressions without assignment are disallowed", node)
fn_type = get_exact_type_from_node(node.value.func)
if isinstance(fn_type, Event):
raise StructureException("To call an event you must use the `log` statement", node)
if isinstance(fn_type, ContractFunction):
if (
fn_type.mutability > StateMutability.VIEW
and self.func.mutability <= StateMutability.VIEW
):
raise StateAccessViolation(
f"Cannot call a mutating function from a {self.func.mutability.value} function",
node,
)
if (
self.func.mutability == StateMutability.PURE
and fn_type.mutability != StateMutability.PURE
):
raise StateAccessViolation(
"Cannot call non-pure function from a pure function", node
)
if isinstance(fn_type, MemberFunctionDefinition) and fn_type.is_modifying:
fn_type.underlying_type.validate_modification(node, self.func.mutability)
# NOTE: fetch_call_return validates call args.
return_value = fn_type.fetch_call_return(node.value)
if (
return_value
and not isinstance(fn_type, MemberFunctionDefinition)
and not isinstance(fn_type, ContractFunction)
):
raise StructureException(
f"Function '{fn_type._id}' cannot be called without assigning the result", node
)
self.expr_visitor.visit(node.value)
def visit_Log(self, node):
if not isinstance(node.value, vy_ast.Call):
raise StructureException("Log must call an event", node)
event = get_exact_type_from_node(node.value.func)
if not isinstance(event, Event):
raise StructureException("Value is not an event", node.value)
event.fetch_call_return(node.value)
self.expr_visitor.visit(node.value)
class _LocalExpressionVisitor(VyperNodeVisitorBase):
ignored_types = (vy_ast.Constant, vy_ast.Name)
scope_name = "function"
def visit_Attribute(self, node: vy_ast.Attribute) -> None:
self.visit(node.value)
_validate_msg_data_attribute(node)
_validate_address_code_attribute(node)
def visit_BinOp(self, node: vy_ast.BinOp) -> None:
self.visit(node.left)
self.visit(node.right)
def visit_BoolOp(self, node: vy_ast.BoolOp) -> None:
for value in node.values: # type: ignore[attr-defined]
self.visit(value)
def visit_Call(self, node: vy_ast.Call) -> None:
self.visit(node.func)
for arg in node.args:
self.visit(arg)
for kwarg in node.keywords:
self.visit(kwarg.value)
def visit_Compare(self, node: vy_ast.Compare) -> None:
self.visit(node.left) # type: ignore[attr-defined]
self.visit(node.right) # type: ignore[attr-defined]
def visit_Dict(self, node: vy_ast.Dict) -> None:
for key in node.keys:
self.visit(key)
for value in node.values:
self.visit(value)
def visit_Index(self, node: vy_ast.Index) -> None:
self.visit(node.value)
def visit_List(self, node: vy_ast.List) -> None:
for element in node.elements:
self.visit(element)
def visit_Subscript(self, node: vy_ast.Subscript) -> None:
self.visit(node.value)
self.visit(node.slice)
def visit_Tuple(self, node: vy_ast.Tuple) -> None:
for element in node.elements:
self.visit(element)
def visit_UnaryOp(self, node: vy_ast.UnaryOp) -> None:
self.visit(node.operand) # type: ignore[attr-defined]
|
# Daisyxmusic (Telegram bot project )
# Copyright (C) 2021 Inukaasith
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import asyncio
import math
import os
import time
from random import randint
from urllib.parse import urlparse
import aiofiles
import aiohttp
import requests
import wget
import youtube_dl
from pyrogram import Client, filters
from pyrogram.errors import FloodWait, MessageNotModified
from pyrogram.types import Message
from youtube_search import YoutubeSearch
from youtubesearchpython import SearchVideos
from DaisyXMusic.config import DURATION_LIMIT
from DaisyXMusic.modules.play import arq
@Client.on_message(filters.command("song") & ~filters.channel)
def song(client, message):
user_id = message.from_user.id
user_name = message.from_user.first_name
rpk = "[" + user_name + "](tg://user?id=" + str(user_id) + ")"
query = ""
for i in message.command[1:]:
query += " " + str(i)
print(query)
m = message.reply("🔎 Finding the song...")
ydl_opts = {"format": "bestaudio[ext=m4a]"}
try:
results = YoutubeSearch(query, max_results=1).to_dict()
link = f"https://youtube.com{results[0]["url_suffix"]}"
# print(results)
title = results[0]["title"][:40]
thumbnail = results[0]["thumbnails"][0]
thumb_name = f"thumb{title}.jpg"
thumb = requests.get(thumbnail, allow_redirects=True)
open(thumb_name, "wb").write(thumb.content)
duration = results[0]["duration"]
results[0]["url_suffix"]
results[0]["views"]
except Exception as e:
m.edit("❌ Found Nothing.\n\nTry another keywork or maybe spell it properly.")
print(str(e))
return
m.edit("Downloading the song ")
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
rep = "**🎵 Uploaded by DaisyXMusic**"
secmul, dur, dur_arr = 1, 0, duration.split(":")
for i in range(len(dur_arr) - 1, -1, -1):
dur += int(dur_arr[i]) * secmul
secmul *= 60
message.reply_audio(
audio_file,
caption=rep,
thumb=thumb_name,
parse_mode="md",
title=title,
duration=dur,
)
m.delete()
except Exception as e:
m.edit("❌ Error")
print(e)
try:
os.remove(audio_file)
os.remove(thumb_name)
except Exception as e:
print(e)
def get_text(message: Message) -> [None, str]:
text_to_return = message.text
if message.text is None:
return None
if " " in text_to_return:
try:
return message.text.split(None, 1)[1]
except IndexError:
return None
else:
return None
def humanbytes(size):
if not size:
return ""
power = 2 ** 10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
async def progress(current, total, message, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
if elapsed_time == 0:
return
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "{0}{1} {2}%\n".format(
"".join(["🔴" for i in range(math.floor(percentage / 10))]),
"".join(["🔘" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total), time_formatter(estimated_total_time)
)
if file_name:
try:
await message.edit(
"{}\n**File Name:** `{}`\n{}".format(type_of_ps, file_name, tmp)
)
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
else:
try:
await message.edit("{}\n{}".format(type_of_ps, tmp))
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
def get_user(message: Message, text: str) -> [int, str, None]:
if text is None:
asplit = None
else:
asplit = text.split(" ", 1)
user_s = None
reason_ = None
if message.reply_to_message:
user_s = message.reply_to_message.from_user.id
reason_ = text if text else None
elif asplit is None:
return None, None
elif len(asplit[0]) > 0:
user_s = int(asplit[0]) if asplit[0].isdigit() else asplit[0]
if len(asplit) == 2:
reason_ = asplit[1]
return user_s, reason_
def get_readable_time(seconds: int) -> int:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "")
+ ((str(hours) + " hour(s), ") if hours else "")
+ ((str(minutes) + " minute(s), ") if minutes else "")
+ ((str(seconds) + " second(s), ") if seconds else "")
+ ((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
)
return tmp[:-2]
ydl_opts = {
"format": "bestaudio/best",
"writethumbnail": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
def get_file_extension_from_url(url):
url_path = urlparse(url).path
basename = os.path.basename(url_path)
return basename.split(".")[-1]
# Funtion To Download Song
async def download_song(url):
song_name = f"{randint(6969, 6999)}.mp3"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
f = await aiofiles.open(song_name, mode="wb")
await f.write(await resp.read())
await f.close()
return song_name
is_downloading = False
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(":"))))
@Client.on_message(filters.command("saavn") & ~filters.edited)
async def jssong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/saavn requires an argument.")
return
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.saavn(query)
if not songs.ok:
await message.reply_text(songs.result)
return
sname = songs.result[0].song
slink = songs.result[0].media_url
ssingers = songs.result[0].singers
await m.edit("Downloading")
song = await download_song(slink)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=sname, performer=ssingers)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
# Deezer Music
@Client.on_message(filters.command("deezer") & ~filters.edited)
async def deezsong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/deezer requires an argument.")
return
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.deezer(query, 1)
if not songs.ok:
await message.reply_text(songs.result)
return
title = songs.result[0].title
url = songs.result[0].url
artist = songs.result[0].artist
await m.edit("Downloading")
song = await download_song(url)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=title, performer=artist)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
@Client.on_message(filters.command(["vsong", "video"]))
async def ytmusic(client, message: Message):
global is_downloading
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
urlissed = get_text(message)
pablo = await client.send_message(
message.chat.id, f"`Getting {urlissed} From Youtube Servers. Please Wait.`"
)
if not urlissed:
await pablo.edit("Invalid Command Syntax, Please Check Help Menu To Know More!")
return
search = SearchVideos(f"{urlissed}", offset=1, mode="dict", max_results=1)
mi = search.result()
mio = mi["search_result"]
mo = mio[0]["link"]
thum = mio[0]["title"]
fridayz = mio[0]["id"]
thums = mio[0]["channel"]
kekme = f"https://img.youtube.com/vi/{fridayz}/hqdefault.jpg"
await asyncio.sleep(0.6)
url = mo
sedlyf = wget.download(kekme)
opts = {
"format": "best",
"addmetadata": True,
"key": "FFmpegMetadata",
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [{"key": "FFmpegVideoConvertor", "preferedformat": "mp4"}],
"outtmpl": "%(id)s.mp4",
"logtostderr": False,
"quiet": True,
}
try:
is_downloading = True
with youtube_dl.YoutubeDL(opts) as ytdl:
infoo = ytdl.extract_info(url, False)
duration = round(infoo["duration"] / 60)
if duration > DURATION_LIMIT:
await pablo.edit(
f"❌ Videos longer than {DURATION_LIMIT} minute(s) aren't allowed, the provided video is {duration} minute(s)"
)
is_downloading = False
return
ytdl_data = ytdl.extract_info(url, download=True)
except Exception:
# await pablo.edit(event, f"**Failed To Download** \n**Error :** `{str(e)}`")
is_downloading = False
return
c_time = time.time()
file_stark = f"{ytdl_data["id"]}.mp4"
capy = f"**Video Name ➠** `{thum}` \n**Requested For :** `{urlissed}` \n**Channel :** `{thums}` \n**Link :** `{mo}`"
await client.send_video(
message.chat.id,
video=open(file_stark, "rb"),
duration=int(ytdl_data["duration"]),
file_name=str(ytdl_data["title"]),
thumb=sedlyf,
caption=capy,
supports_streaming=True,
progress=progress,
progress_args=(
pablo,
c_time,
f"`Uploading {urlissed} Song From YouTube Music!`",
file_stark,
),
)
await pablo.delete()
is_downloading = False
for files in (sedlyf, file_stark):
if files and os.path.exists(files):
os.remove(files)
|
# Daisyxmusic (Telegram bot project )
# Copyright (C) 2021 Inukaasith
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import asyncio
import math
import os
import time
from random import randint
from urllib.parse import urlparse
import aiofiles
import aiohttp
import requests
import wget
import youtube_dl
from pyrogram import Client, filters
from pyrogram.errors import FloodWait, MessageNotModified
from pyrogram.types import Message
from youtube_search import YoutubeSearch
from youtubesearchpython import SearchVideos
from DaisyXMusic.config import DURATION_LIMIT
from DaisyXMusic.modules.play import arq
@Client.on_message(filters.command("song") & ~filters.channel)
def song(client, message):
user_id = message.from_user.id
user_name = message.from_user.first_name
rpk = "[" + user_name + "](tg://user?id=" + str(user_id) + ")"
query = ""
for i in message.command[1:]:
query += " " + str(i)
print(query)
m = message.reply("🔎 Finding the song...")
ydl_opts = {"format": "bestaudio[ext=m4a]"}
try:
results = YoutubeSearch(query, max_results=1).to_dict()
link = f"https://youtube.com{results[0]['url_suffix']}"
# print(results)
title = results[0]["title"][:40]
thumbnail = results[0]["thumbnails"][0]
thumb_name = f"thumb{title}.jpg"
thumb = requests.get(thumbnail, allow_redirects=True)
open(thumb_name, "wb").write(thumb.content)
duration = results[0]["duration"]
results[0]["url_suffix"]
results[0]["views"]
except Exception as e:
m.edit("❌ Found Nothing.\n\nTry another keywork or maybe spell it properly.")
print(str(e))
return
m.edit("Downloading the song ")
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
rep = "**🎵 Uploaded by DaisyXMusic**"
secmul, dur, dur_arr = 1, 0, duration.split(":")
for i in range(len(dur_arr) - 1, -1, -1):
dur += int(dur_arr[i]) * secmul
secmul *= 60
message.reply_audio(
audio_file,
caption=rep,
thumb=thumb_name,
parse_mode="md",
title=title,
duration=dur,
)
m.delete()
except Exception as e:
m.edit("❌ Error")
print(e)
try:
os.remove(audio_file)
os.remove(thumb_name)
except Exception as e:
print(e)
def get_text(message: Message) -> [None, str]:
text_to_return = message.text
if message.text is None:
return None
if " " in text_to_return:
try:
return message.text.split(None, 1)[1]
except IndexError:
return None
else:
return None
def humanbytes(size):
if not size:
return ""
power = 2 ** 10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
async def progress(current, total, message, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
if elapsed_time == 0:
return
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "{0}{1} {2}%\n".format(
"".join(["🔴" for i in range(math.floor(percentage / 10))]),
"".join(["🔘" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total), time_formatter(estimated_total_time)
)
if file_name:
try:
await message.edit(
"{}\n**File Name:** `{}`\n{}".format(type_of_ps, file_name, tmp)
)
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
else:
try:
await message.edit("{}\n{}".format(type_of_ps, tmp))
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
def get_user(message: Message, text: str) -> [int, str, None]:
if text is None:
asplit = None
else:
asplit = text.split(" ", 1)
user_s = None
reason_ = None
if message.reply_to_message:
user_s = message.reply_to_message.from_user.id
reason_ = text if text else None
elif asplit is None:
return None, None
elif len(asplit[0]) > 0:
user_s = int(asplit[0]) if asplit[0].isdigit() else asplit[0]
if len(asplit) == 2:
reason_ = asplit[1]
return user_s, reason_
def get_readable_time(seconds: int) -> int:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "")
+ ((str(hours) + " hour(s), ") if hours else "")
+ ((str(minutes) + " minute(s), ") if minutes else "")
+ ((str(seconds) + " second(s), ") if seconds else "")
+ ((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
)
return tmp[:-2]
ydl_opts = {
"format": "bestaudio/best",
"writethumbnail": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
def get_file_extension_from_url(url):
url_path = urlparse(url).path
basename = os.path.basename(url_path)
return basename.split(".")[-1]
# Funtion To Download Song
async def download_song(url):
song_name = f"{randint(6969, 6999)}.mp3"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
f = await aiofiles.open(song_name, mode="wb")
await f.write(await resp.read())
await f.close()
return song_name
is_downloading = False
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(":"))))
@Client.on_message(filters.command("saavn") & ~filters.edited)
async def jssong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/saavn requires an argument.")
return
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.saavn(query)
if not songs.ok:
await message.reply_text(songs.result)
return
sname = songs.result[0].song
slink = songs.result[0].media_url
ssingers = songs.result[0].singers
await m.edit("Downloading")
song = await download_song(slink)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=sname, performer=ssingers)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
# Deezer Music
@Client.on_message(filters.command("deezer") & ~filters.edited)
async def deezsong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/deezer requires an argument.")
return
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.deezer(query, 1)
if not songs.ok:
await message.reply_text(songs.result)
return
title = songs.result[0].title
url = songs.result[0].url
artist = songs.result[0].artist
await m.edit("Downloading")
song = await download_song(url)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=title, performer=artist)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
@Client.on_message(filters.command(["vsong", "video"]))
async def ytmusic(client, message: Message):
global is_downloading
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
urlissed = get_text(message)
pablo = await client.send_message(
message.chat.id, f"`Getting {urlissed} From Youtube Servers. Please Wait.`"
)
if not urlissed:
await pablo.edit("Invalid Command Syntax, Please Check Help Menu To Know More!")
return
search = SearchVideos(f"{urlissed}", offset=1, mode="dict", max_results=1)
mi = search.result()
mio = mi["search_result"]
mo = mio[0]["link"]
thum = mio[0]["title"]
fridayz = mio[0]["id"]
thums = mio[0]["channel"]
kekme = f"https://img.youtube.com/vi/{fridayz}/hqdefault.jpg"
await asyncio.sleep(0.6)
url = mo
sedlyf = wget.download(kekme)
opts = {
"format": "best",
"addmetadata": True,
"key": "FFmpegMetadata",
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [{"key": "FFmpegVideoConvertor", "preferedformat": "mp4"}],
"outtmpl": "%(id)s.mp4",
"logtostderr": False,
"quiet": True,
}
try:
is_downloading = True
with youtube_dl.YoutubeDL(opts) as ytdl:
infoo = ytdl.extract_info(url, False)
duration = round(infoo["duration"] / 60)
if duration > DURATION_LIMIT:
await pablo.edit(
f"❌ Videos longer than {DURATION_LIMIT} minute(s) aren't allowed, the provided video is {duration} minute(s)"
)
is_downloading = False
return
ytdl_data = ytdl.extract_info(url, download=True)
except Exception:
# await pablo.edit(event, f"**Failed To Download** \n**Error :** `{str(e)}`")
is_downloading = False
return
c_time = time.time()
file_stark = f"{ytdl_data['id']}.mp4"
capy = f"**Video Name ➠** `{thum}` \n**Requested For :** `{urlissed}` \n**Channel :** `{thums}` \n**Link :** `{mo}`"
await client.send_video(
message.chat.id,
video=open(file_stark, "rb"),
duration=int(ytdl_data["duration"]),
file_name=str(ytdl_data["title"]),
thumb=sedlyf,
caption=capy,
supports_streaming=True,
progress=progress,
progress_args=(
pablo,
c_time,
f"`Uploading {urlissed} Song From YouTube Music!`",
file_stark,
),
)
await pablo.delete()
is_downloading = False
for files in (sedlyf, file_stark):
if files and os.path.exists(files):
os.remove(files)
|
#!/usr/bin/env python3
import sys
import time
import datetime
import os
import psutil
def main():
CurrentTime = datetime.datetime.now()
with open(r"/sys/class/thermal/thermal_zone0/temp") as f:
CurrentTemp0 = f.readline()
with open(r"/sys/class/thermal/thermal_zone1/temp") as f:
CurrentTemp1 = f.readline()
freq = []
for i in range(4):
with open(f"/sys/devices/system/cpu/cpu{i}/cpufreq/cpuinfo_cur_freq") as f:
freq.append(f.readline())
with open(r"/sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state") as f:
time_in_state = f.read()
print(f"\n{CurrentTime.strftime("%H:%M:%S")}\t CPU0-1: {float(CurrentTemp0) / 1000} ℃\t\tCPU2-3: {float(CurrentTemp1) / 1000} ℃")
cpu = psutil.cpu_times_percent(percpu=True)
time.sleep(1)
cpu = psutil.cpu_times_percent(percpu=True)
print(f"\nCPU busy (%) (1-4) : {100-cpu[0].idle:.2f} {100-cpu[1].idle:.2f} {100-cpu[2].idle:.2f} {100-cpu[3].idle:.2f}")
print(f"\nCPU freq (kHz) (1-4) : {int(freq[0])/1000} {int(freq[1])/1000} {int(freq[2])/1000} {int(freq[3])/1000}")
print("\nTIME IN STATE\n-------------\nkHz Percent\n-------------")
total = 0
for t in time_in_state.split('\n'):
if t:
freq, per = t.split()
total += int(per)
for t in time_in_state.split('\n'):
if t:
freq, per = t.split()
freq = int(int(freq)/1000)
per = int(int(per) / total * 100)
print(f"{freq} {per}")
print("\nOSP Status")
os.system('ps -T -p `pgrep OSP` -o cpuid,cls,pri,pcpu,lwp,comm')
diskfree = psutil.disk_usage('/').percent
print(f"\nDiskfree: {diskfree}%")
print("\nCharge Log\n----------")
with open(r"/var/log/charge.log") as f:
print(f.read())
if __name__ == '__main__':
sys.exit(main())
|
#!/usr/bin/env python3
import sys
import time
import datetime
import os
import psutil
def main():
CurrentTime = datetime.datetime.now()
with open(r"/sys/class/thermal/thermal_zone0/temp") as f:
CurrentTemp0 = f.readline()
with open(r"/sys/class/thermal/thermal_zone1/temp") as f:
CurrentTemp1 = f.readline()
freq = []
for i in range(4):
with open(f"/sys/devices/system/cpu/cpu{i}/cpufreq/cpuinfo_cur_freq") as f:
freq.append(f.readline())
with open(r"/sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state") as f:
time_in_state = f.read()
print(f"\n{CurrentTime.strftime('%H:%M:%S')}\t CPU0-1: {float(CurrentTemp0) / 1000} ℃\t\tCPU2-3: {float(CurrentTemp1) / 1000} ℃")
cpu = psutil.cpu_times_percent(percpu=True)
time.sleep(1)
cpu = psutil.cpu_times_percent(percpu=True)
print(f"\nCPU busy (%) (1-4) : {100-cpu[0].idle:.2f} {100-cpu[1].idle:.2f} {100-cpu[2].idle:.2f} {100-cpu[3].idle:.2f}")
print(f"\nCPU freq (kHz) (1-4) : {int(freq[0])/1000} {int(freq[1])/1000} {int(freq[2])/1000} {int(freq[3])/1000}")
print("\nTIME IN STATE\n-------------\nkHz Percent\n-------------")
total = 0
for t in time_in_state.split('\n'):
if t:
freq, per = t.split()
total += int(per)
for t in time_in_state.split('\n'):
if t:
freq, per = t.split()
freq = int(int(freq)/1000)
per = int(int(per) / total * 100)
print(f"{freq} {per}")
print("\nOSP Status")
os.system('ps -T -p `pgrep OSP` -o cpuid,cls,pri,pcpu,lwp,comm')
diskfree = psutil.disk_usage('/').percent
print(f"\nDiskfree: {diskfree}%")
print("\nCharge Log\n----------")
with open(r"/var/log/charge.log") as f:
print(f.read())
if __name__ == '__main__':
sys.exit(main())
|
# YOLOv5 YOLO-specific modules
import argparse
import logging
import sys
from copy import deepcopy
sys.path.append('./') # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop # for FLOPS computation
except ImportError:
thop = None
class Detect(nn.Module):
stride = None # strides computed during build
export = False # onnx export
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
self.training |= self.export
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
logger.info(f"Overriding model.yaml nc={self.yaml["nc"]} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 256 # 2x min stride
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# print('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
self.info()
logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi[..., :4] /= si # de-scale
if fi == 2:
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
elif fi == 3:
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
else:
return self.forward_once(x, profile) # single-scale inference, train
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
print('%.1fms total' % sum(dt))
return x
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
self.info()
return self
def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
print('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
print('Removing NMS... ')
self.model = self.model[:-1] # remove
return self
def autoshape(self): # add autoShape module
print('Adding autoShape... ')
m = autoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
def parse_model(d, ch): # model_dict, input_channels(3)
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
C3, C3TR]:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3, C3TR]:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device)
# Create model
model = Model(opt.cfg).to(device)
model.train()
# Profile
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
# y = model(img, profile=True)
# Tensorboard
# from torch.utils.tensorboard import SummaryWriter
# tb_writer = SummaryWriter()
# print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
# tb_writer.add_graph(model.model, img) # add model to tensorboard
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
|
# YOLOv5 YOLO-specific modules
import argparse
import logging
import sys
from copy import deepcopy
sys.path.append('./') # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop # for FLOPS computation
except ImportError:
thop = None
class Detect(nn.Module):
stride = None # strides computed during build
export = False # onnx export
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
self.training |= self.export
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 256 # 2x min stride
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# print('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
self.info()
logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi[..., :4] /= si # de-scale
if fi == 2:
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
elif fi == 3:
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
else:
return self.forward_once(x, profile) # single-scale inference, train
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
print('%.1fms total' % sum(dt))
return x
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
self.info()
return self
def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
print('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
print('Removing NMS... ')
self.model = self.model[:-1] # remove
return self
def autoshape(self): # add autoShape module
print('Adding autoShape... ')
m = autoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
def parse_model(d, ch): # model_dict, input_channels(3)
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
C3, C3TR]:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3, C3TR]:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device)
# Create model
model = Model(opt.cfg).to(device)
model.train()
# Profile
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
# y = model(img, profile=True)
# Tensorboard
# from torch.utils.tensorboard import SummaryWriter
# tb_writer = SummaryWriter()
# print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
# tb_writer.add_graph(model.model, img) # add model to tensorboard
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Miscellaneous generic utility functions and classes."""
from datetime import datetime
import filecmp
import inspect
import io
import os
import re
import sys
from typing import Any, Dict
from uuid import UUID
from .lang import classproperty
def get_new_uuid():
"""
Return a new UUID (typically to be used for new nodes).
It uses the UUID version specified in
aiida.backends.settings.AIIDANODES_UUID_VERSION
"""
import uuid
return str(uuid.uuid4())
def validate_uuid(given_uuid: str) -> bool:
"""A simple check for the UUID validity."""
try:
parsed_uuid = UUID(given_uuid, version=4)
except ValueError:
# If not a valid UUID
return False
# Check if there was any kind of conversion of the hex during
# the validation
return str(parsed_uuid) == given_uuid
def validate_list_of_string_tuples(val, tuple_length):
"""
Check that:
1. ``val`` is a list or tuple
2. each element of the list:
a. is a list or tuple
b. is of length equal to the parameter tuple_length
c. each of the two elements is a string
Return if valid, raise ValidationError if invalid
"""
from aiida.common.exceptions import ValidationError
err_msg = (
'the value must be a list (or tuple) '
'of length-N list (or tuples), whose elements are strings; '
'N={}'.format(tuple_length)
)
if not isinstance(val, (list, tuple)):
raise ValidationError(err_msg)
for element in val:
if (
not isinstance(element, (list, tuple)) or (len(element) != tuple_length) or
not all(isinstance(s, str) for s in element)
):
raise ValidationError(err_msg)
return True
def get_unique_filename(filename, list_of_filenames):
"""
Return a unique filename that can be added to the list_of_filenames.
If filename is not in list_of_filenames, it simply returns the filename
string itself. Otherwise, it appends a integer number to the filename
(before the extension) until it finds a unique filename.
:param filename: the filename to add
:param list_of_filenames: the list of filenames to which filename
should be added, without name duplicates
:returns: Either filename or its modification, with a number appended
between the name and the extension.
"""
if filename not in list_of_filenames:
return filename
basename, ext = os.path.splitext(filename)
# Not optimized, but for the moment this should be fast enough
append_int = 1
while True:
new_filename = f'{basename:s}-{append_int:d}{ext:s}'
if new_filename not in list_of_filenames:
break
append_int += 1
return new_filename
def str_timedelta(dt, max_num_fields=3, short=False, negative_to_zero=False): # pylint: disable=invalid-name
"""
Given a dt in seconds, return it in a HH:MM:SS format.
:param dt: a TimeDelta object
:param max_num_fields: maximum number of non-zero fields to show
(for instance if the number of days is non-zero, shows only
days, hours and minutes, but not seconds)
:param short: if False, print always ``max_num_fields`` fields, even
if they are zero. If True, do not print the first fields, if they
are zero.
:param negative_to_zero: if True, set dt = 0 if dt < 0.
"""
if max_num_fields <= 0:
raise ValueError('max_num_fields must be > 0')
s_tot = dt.total_seconds() # Important to get more than 1 day, and for
# negative values. dt.seconds would give
# wrong results in these cases, see
# http://docs.python.org/2/library/datetime.html
s_tot = int(s_tot)
if negative_to_zero:
s_tot = max(s_tot, 0)
negative = (s_tot < 0)
s_tot = abs(s_tot)
negative_string = ' in the future' if negative else ' ago'
# For the moment stay away from months and years, difficult to get
days, remainder = divmod(s_tot, 3600 * 24)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
all_fields = [(days, 'D'), (hours, 'h'), (minutes, 'm'), (seconds, 's')]
fields = []
start_insert = False
counter = 0
for idx, field in enumerate(all_fields):
if field[0] != 0:
start_insert = True
if (len(all_fields) - idx) <= max_num_fields:
start_insert = True
if start_insert:
if counter >= max_num_fields:
break
fields.append(field)
counter += 1
if short:
while len(fields) > 1: # at least one element has to remain
if fields[0][0] != 0:
break
fields.pop(0) # remove first element
# Join the fields
raw_string = ':'.join(['{:02d}{}'.format(*f) for f in fields])
if raw_string.startswith('0'):
raw_string = raw_string[1:]
# Return the resulting string, appending a suitable string if the time
# is negative
return f'{raw_string}{negative_string}'
def get_class_string(obj):
"""
Return the string identifying the class of the object (module + object name,
joined by dots).
It works both for classes and for class instances.
"""
if inspect.isclass(obj):
return f'{obj.__module__}.{obj.__name__}'
return f'{obj.__module__}.{obj.__class__.__name__}'
def get_object_from_string(class_string):
"""
Given a string identifying an object (as returned by the get_class_string
method) load and return the actual object.
"""
import importlib
the_module, _, the_name = class_string.rpartition('.')
return getattr(importlib.import_module(the_module), the_name)
def grouper(n, iterable): # pylint: disable=invalid-name
"""
Given an iterable, returns an iterable that returns tuples of groups of
elements from iterable of length n, except the last one that has the
required length to exaust iterable (i.e., there is no filling applied).
:param n: length of each tuple (except the last one,that will have length
<= n
:param iterable: the iterable to divide in groups
"""
import itertools
iterator = iter(iterable)
while True:
chunk = tuple(itertools.islice(iterator, n))
if not chunk:
return
yield chunk
class ArrayCounter:
"""
A counter & a method that increments it and returns its value.
It is used in various tests.
"""
seq = None
def __init__(self):
self.seq = -1
def array_counter(self):
self.seq += 1
return self.seq
def are_dir_trees_equal(dir1, dir2):
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: True if the directory trees are the same and
there were no errors while accessing the directories or files,
False otherwise.
"""
# Directory comparison
dirs_cmp = filecmp.dircmp(dir1, dir2)
if dirs_cmp.left_only or dirs_cmp.right_only or dirs_cmp.funny_files:
return (
False, 'Left directory: {}, right directory: {}, files only '
'in left directory: {}, files only in right directory: '
'{}, not comparable files: {}'.format(
dir1, dir2, dirs_cmp.left_only, dirs_cmp.right_only, dirs_cmp.funny_files
)
)
# If the directories contain the same files, compare the common files
(_, mismatch, errors) = filecmp.cmpfiles(dir1, dir2, dirs_cmp.common_files, shallow=False)
if mismatch:
return (False, f"The following files in the directories {dir1} and {dir2} don't match: {mismatch}")
if errors:
return (False, f"The following files in the directories {dir1} and {dir2} aren't regular: {errors}")
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
res, msg = are_dir_trees_equal(new_dir1, new_dir2)
if not res:
return False, msg
return True, f'The given directories ({dir1} and {dir2}) are equal'
class Prettifier:
"""
Class to manage prettifiers (typically for labels of kpoints
in band plots)
"""
@classmethod
def _prettify_label_pass(cls, label):
"""
No-op prettifier, simply returns the same label
:param label: a string to prettify
"""
return label
@classmethod
def _prettify_label_agr(cls, label):
"""
Prettifier for XMGrace
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'\xG\f{}')
.replace('DELTA', r'\xD\f{}')
.replace('LAMBDA', r'\xL\f{}')
.replace('SIGMA', r'\xS\f{}')
) # yapf:disable
return re.sub(r'_(.?)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_agr_simple(cls, label):
"""
Prettifier for XMGrace (for old label names)
:param label: a string to prettify
"""
if label == 'G':
return r'\xG'
return re.sub(r'(\d+)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_gnuplot(cls, label):
"""
Prettifier for Gnuplot
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', 'Γ')
.replace('DELTA', 'Δ')
.replace('LAMBDA', 'Λ')
.replace('SIGMA', 'Σ')
) # yapf:disable
return re.sub(r'_(.?)', r'_{\1}', label)
@classmethod
def _prettify_label_gnuplot_simple(cls, label):
"""
Prettifier for Gnuplot (for old label names)
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
if label == 'G':
return 'Γ'
return re.sub(r'(\d+)', r'_{\1}', label)
@classmethod
def _prettify_label_latex(cls, label):
"""
Prettifier for matplotlib, using LaTeX syntax
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'$\Gamma$')
.replace('DELTA', r'$\Delta$')
.replace('LAMBDA', r'$\Lambda$')
.replace('SIGMA', r'$\Sigma$')
) # yapf:disable
label = re.sub(r'_(.?)', r'$_{\1}$', label)
# label += r"$_{\vphantom{0}}$"
return label
@classmethod
def _prettify_label_latex_simple(cls, label):
"""
Prettifier for matplotlib, using LaTeX syntax (for old label names)
:param label: a string to prettify
"""
if label == 'G':
return r'$\Gamma$'
return re.sub(r'(\d+)', r'$_{\1}$', label)
@classproperty
def prettifiers(cls) -> Dict[str, Any]: # pylint: disable=no-self-argument
"""
Property that returns a dictionary that for each string associates
the function to prettify a label
:return: a dictionary where keys are strings and values are functions
"""
return {
'agr_seekpath': cls._prettify_label_agr,
'agr_simple': cls._prettify_label_agr_simple,
'latex_simple': cls._prettify_label_latex_simple,
'latex_seekpath': cls._prettify_label_latex,
'gnuplot_simple': cls._prettify_label_gnuplot_simple,
'gnuplot_seekpath': cls._prettify_label_gnuplot,
'pass': cls._prettify_label_pass,
}
@classmethod
def get_prettifiers(cls):
"""
Return a list of valid prettifier strings
:return: a list of strings
"""
return sorted(cls.prettifiers.keys())
def __init__(self, format): # pylint: disable=redefined-builtin
"""
Create a class to pretttify strings of a given format
:param format: a string with the format to use to prettify.
Valid formats are obtained from self.prettifiers
"""
if format is None:
format = 'pass'
try:
self._prettifier_f = self.prettifiers[format] # pylint: disable=unsubscriptable-object
except KeyError:
raise ValueError(f"Unknown prettifier format {format}; valid formats: {", ".join(self.get_prettifiers())}")
def prettify(self, label):
"""
Prettify a label using the format passed in the initializer
:param label: the string to prettify
:return: a prettified string
"""
return self._prettifier_f(label)
def prettify_labels(labels, format=None): # pylint: disable=redefined-builtin
"""
Prettify label for typesetting in various formats
:param labels: a list of length-2 tuples, in the format(position, label)
:param format: a string with the format for the prettifier (e.g. 'agr',
'matplotlib', ...)
:return: the same list as labels, but with the second value possibly replaced
with a prettified version that typesets nicely in the selected format
"""
prettifier = Prettifier(format)
return [(pos, prettifier.prettify(label)) for pos, label in labels]
def join_labels(labels, join_symbol='|', threshold=1.e-6):
"""
Join labels with a joining symbol when they are very close
:param labels: a list of length-2 tuples, in the format(position, label)
:param join_symbol: the string to use to join different paths. By default, a pipe
:param threshold: the threshold to decide if two float values are the same and should
be joined
:return: the same list as labels, but with the second value possibly replaced
with strings joined when close enough
"""
if labels:
new_labels = [list(labels[0])]
# modify labels when in overlapping position
j = 0
for i in range(1, len(labels)):
if abs(labels[i][0] - labels[i - 1][0]) < threshold:
new_labels[j][1] += join_symbol + labels[i][1]
else:
new_labels.append(list(labels[i]))
j += 1
else:
new_labels = []
return new_labels
def strip_prefix(full_string, prefix):
"""
Strip the prefix from the given string and return it. If the prefix is not present
the original string will be returned unaltered
:param full_string: the string from which to remove the prefix
:param prefix: the prefix to remove
:return: the string with prefix removed
"""
if full_string.startswith(prefix):
return full_string.rsplit(prefix)[1]
return full_string
class Capturing:
"""
This class captures stdout and returns it
(as a list, split by lines).
Note: if you raise a SystemExit, you have to catch it outside.
E.g., in our tests, this works::
import sys
with self.assertRaises(SystemExit):
with Capturing() as output:
sys.exit()
But out of the testing environment, the code instead just exits.
To use it, access the obj.stdout_lines, or just iterate over the object
:param capture_stderr: if True, also captures sys.stderr. To access the
lines, use obj.stderr_lines. If False, obj.stderr_lines is None.
"""
# pylint: disable=attribute-defined-outside-init
def __init__(self, capture_stderr=False):
self.stdout_lines = []
super().__init__()
self._capture_stderr = capture_stderr
if self._capture_stderr:
self.stderr_lines = []
else:
self.stderr_lines = None
def __enter__(self):
"""Enter the context where all output is captured."""
self._stdout = sys.stdout
self._stringioout = io.StringIO()
sys.stdout = self._stringioout
if self._capture_stderr:
self._stderr = sys.stderr
self._stringioerr = io.StringIO()
sys.stderr = self._stringioerr
return self
def __exit__(self, *args):
"""Exit the context where all output is captured."""
self.stdout_lines.extend(self._stringioout.getvalue().splitlines())
sys.stdout = self._stdout
del self._stringioout # free up some memory
if self._capture_stderr:
self.stderr_lines.extend(self._stringioerr.getvalue().splitlines())
sys.stderr = self._stderr
del self._stringioerr # free up some memory
def __str__(self):
return str(self.stdout_lines)
def __iter__(self):
return iter(self.stdout_lines)
class ErrorAccumulator:
"""
Allows to run a number of functions and collect all the errors they raise
This allows to validate multiple things and tell the user about all the
errors encountered at once. Works best if the individual functions do not depend on each other.
Does not allow to trace the stack of each error, therefore do not use for debugging, but for
semantical checking with user friendly error messages.
"""
def __init__(self, *error_cls):
self.error_cls = error_cls
self.errors = {k: [] for k in self.error_cls}
def run(self, function, *args, **kwargs):
try:
function(*args, **kwargs)
except self.error_cls as err:
self.errors[err.__class__].append(err)
def success(self):
return bool(not any(self.errors.values()))
def result(self, raise_error=Exception):
if raise_error:
self.raise_errors(raise_error)
return self.success(), self.errors
def raise_errors(self, raise_cls):
if not self.success():
raise raise_cls(f'The following errors were encountered: {self.errors}')
class DatetimePrecision:
"""
A simple class which stores a datetime object with its precision. No
internal check is done (cause itis not possible).
precision: 1 (only full date)
2 (date plus hour)
3 (date + hour + minute)
4 (dare + hour + minute +second)
"""
def __init__(self, dtobj, precision):
""" Constructor to check valid datetime object and precision """
if not isinstance(dtobj, datetime):
raise TypeError('dtobj argument has to be a datetime object')
if not isinstance(precision, int):
raise TypeError('precision argument has to be an integer')
self.dtobj = dtobj
self.precision = precision
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Miscellaneous generic utility functions and classes."""
from datetime import datetime
import filecmp
import inspect
import io
import os
import re
import sys
from typing import Any, Dict
from uuid import UUID
from .lang import classproperty
def get_new_uuid():
"""
Return a new UUID (typically to be used for new nodes).
It uses the UUID version specified in
aiida.backends.settings.AIIDANODES_UUID_VERSION
"""
import uuid
return str(uuid.uuid4())
def validate_uuid(given_uuid: str) -> bool:
"""A simple check for the UUID validity."""
try:
parsed_uuid = UUID(given_uuid, version=4)
except ValueError:
# If not a valid UUID
return False
# Check if there was any kind of conversion of the hex during
# the validation
return str(parsed_uuid) == given_uuid
def validate_list_of_string_tuples(val, tuple_length):
"""
Check that:
1. ``val`` is a list or tuple
2. each element of the list:
a. is a list or tuple
b. is of length equal to the parameter tuple_length
c. each of the two elements is a string
Return if valid, raise ValidationError if invalid
"""
from aiida.common.exceptions import ValidationError
err_msg = (
'the value must be a list (or tuple) '
'of length-N list (or tuples), whose elements are strings; '
'N={}'.format(tuple_length)
)
if not isinstance(val, (list, tuple)):
raise ValidationError(err_msg)
for element in val:
if (
not isinstance(element, (list, tuple)) or (len(element) != tuple_length) or
not all(isinstance(s, str) for s in element)
):
raise ValidationError(err_msg)
return True
def get_unique_filename(filename, list_of_filenames):
"""
Return a unique filename that can be added to the list_of_filenames.
If filename is not in list_of_filenames, it simply returns the filename
string itself. Otherwise, it appends a integer number to the filename
(before the extension) until it finds a unique filename.
:param filename: the filename to add
:param list_of_filenames: the list of filenames to which filename
should be added, without name duplicates
:returns: Either filename or its modification, with a number appended
between the name and the extension.
"""
if filename not in list_of_filenames:
return filename
basename, ext = os.path.splitext(filename)
# Not optimized, but for the moment this should be fast enough
append_int = 1
while True:
new_filename = f'{basename:s}-{append_int:d}{ext:s}'
if new_filename not in list_of_filenames:
break
append_int += 1
return new_filename
def str_timedelta(dt, max_num_fields=3, short=False, negative_to_zero=False): # pylint: disable=invalid-name
"""
Given a dt in seconds, return it in a HH:MM:SS format.
:param dt: a TimeDelta object
:param max_num_fields: maximum number of non-zero fields to show
(for instance if the number of days is non-zero, shows only
days, hours and minutes, but not seconds)
:param short: if False, print always ``max_num_fields`` fields, even
if they are zero. If True, do not print the first fields, if they
are zero.
:param negative_to_zero: if True, set dt = 0 if dt < 0.
"""
if max_num_fields <= 0:
raise ValueError('max_num_fields must be > 0')
s_tot = dt.total_seconds() # Important to get more than 1 day, and for
# negative values. dt.seconds would give
# wrong results in these cases, see
# http://docs.python.org/2/library/datetime.html
s_tot = int(s_tot)
if negative_to_zero:
s_tot = max(s_tot, 0)
negative = (s_tot < 0)
s_tot = abs(s_tot)
negative_string = ' in the future' if negative else ' ago'
# For the moment stay away from months and years, difficult to get
days, remainder = divmod(s_tot, 3600 * 24)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
all_fields = [(days, 'D'), (hours, 'h'), (minutes, 'm'), (seconds, 's')]
fields = []
start_insert = False
counter = 0
for idx, field in enumerate(all_fields):
if field[0] != 0:
start_insert = True
if (len(all_fields) - idx) <= max_num_fields:
start_insert = True
if start_insert:
if counter >= max_num_fields:
break
fields.append(field)
counter += 1
if short:
while len(fields) > 1: # at least one element has to remain
if fields[0][0] != 0:
break
fields.pop(0) # remove first element
# Join the fields
raw_string = ':'.join(['{:02d}{}'.format(*f) for f in fields])
if raw_string.startswith('0'):
raw_string = raw_string[1:]
# Return the resulting string, appending a suitable string if the time
# is negative
return f'{raw_string}{negative_string}'
def get_class_string(obj):
"""
Return the string identifying the class of the object (module + object name,
joined by dots).
It works both for classes and for class instances.
"""
if inspect.isclass(obj):
return f'{obj.__module__}.{obj.__name__}'
return f'{obj.__module__}.{obj.__class__.__name__}'
def get_object_from_string(class_string):
"""
Given a string identifying an object (as returned by the get_class_string
method) load and return the actual object.
"""
import importlib
the_module, _, the_name = class_string.rpartition('.')
return getattr(importlib.import_module(the_module), the_name)
def grouper(n, iterable): # pylint: disable=invalid-name
"""
Given an iterable, returns an iterable that returns tuples of groups of
elements from iterable of length n, except the last one that has the
required length to exaust iterable (i.e., there is no filling applied).
:param n: length of each tuple (except the last one,that will have length
<= n
:param iterable: the iterable to divide in groups
"""
import itertools
iterator = iter(iterable)
while True:
chunk = tuple(itertools.islice(iterator, n))
if not chunk:
return
yield chunk
class ArrayCounter:
"""
A counter & a method that increments it and returns its value.
It is used in various tests.
"""
seq = None
def __init__(self):
self.seq = -1
def array_counter(self):
self.seq += 1
return self.seq
def are_dir_trees_equal(dir1, dir2):
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: True if the directory trees are the same and
there were no errors while accessing the directories or files,
False otherwise.
"""
# Directory comparison
dirs_cmp = filecmp.dircmp(dir1, dir2)
if dirs_cmp.left_only or dirs_cmp.right_only or dirs_cmp.funny_files:
return (
False, 'Left directory: {}, right directory: {}, files only '
'in left directory: {}, files only in right directory: '
'{}, not comparable files: {}'.format(
dir1, dir2, dirs_cmp.left_only, dirs_cmp.right_only, dirs_cmp.funny_files
)
)
# If the directories contain the same files, compare the common files
(_, mismatch, errors) = filecmp.cmpfiles(dir1, dir2, dirs_cmp.common_files, shallow=False)
if mismatch:
return (False, f"The following files in the directories {dir1} and {dir2} don't match: {mismatch}")
if errors:
return (False, f"The following files in the directories {dir1} and {dir2} aren't regular: {errors}")
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
res, msg = are_dir_trees_equal(new_dir1, new_dir2)
if not res:
return False, msg
return True, f'The given directories ({dir1} and {dir2}) are equal'
class Prettifier:
"""
Class to manage prettifiers (typically for labels of kpoints
in band plots)
"""
@classmethod
def _prettify_label_pass(cls, label):
"""
No-op prettifier, simply returns the same label
:param label: a string to prettify
"""
return label
@classmethod
def _prettify_label_agr(cls, label):
"""
Prettifier for XMGrace
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'\xG\f{}')
.replace('DELTA', r'\xD\f{}')
.replace('LAMBDA', r'\xL\f{}')
.replace('SIGMA', r'\xS\f{}')
) # yapf:disable
return re.sub(r'_(.?)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_agr_simple(cls, label):
"""
Prettifier for XMGrace (for old label names)
:param label: a string to prettify
"""
if label == 'G':
return r'\xG'
return re.sub(r'(\d+)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_gnuplot(cls, label):
"""
Prettifier for Gnuplot
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', 'Γ')
.replace('DELTA', 'Δ')
.replace('LAMBDA', 'Λ')
.replace('SIGMA', 'Σ')
) # yapf:disable
return re.sub(r'_(.?)', r'_{\1}', label)
@classmethod
def _prettify_label_gnuplot_simple(cls, label):
"""
Prettifier for Gnuplot (for old label names)
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
if label == 'G':
return 'Γ'
return re.sub(r'(\d+)', r'_{\1}', label)
@classmethod
def _prettify_label_latex(cls, label):
"""
Prettifier for matplotlib, using LaTeX syntax
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'$\Gamma$')
.replace('DELTA', r'$\Delta$')
.replace('LAMBDA', r'$\Lambda$')
.replace('SIGMA', r'$\Sigma$')
) # yapf:disable
label = re.sub(r'_(.?)', r'$_{\1}$', label)
# label += r"$_{\vphantom{0}}$"
return label
@classmethod
def _prettify_label_latex_simple(cls, label):
"""
Prettifier for matplotlib, using LaTeX syntax (for old label names)
:param label: a string to prettify
"""
if label == 'G':
return r'$\Gamma$'
return re.sub(r'(\d+)', r'$_{\1}$', label)
@classproperty
def prettifiers(cls) -> Dict[str, Any]: # pylint: disable=no-self-argument
"""
Property that returns a dictionary that for each string associates
the function to prettify a label
:return: a dictionary where keys are strings and values are functions
"""
return {
'agr_seekpath': cls._prettify_label_agr,
'agr_simple': cls._prettify_label_agr_simple,
'latex_simple': cls._prettify_label_latex_simple,
'latex_seekpath': cls._prettify_label_latex,
'gnuplot_simple': cls._prettify_label_gnuplot_simple,
'gnuplot_seekpath': cls._prettify_label_gnuplot,
'pass': cls._prettify_label_pass,
}
@classmethod
def get_prettifiers(cls):
"""
Return a list of valid prettifier strings
:return: a list of strings
"""
return sorted(cls.prettifiers.keys())
def __init__(self, format): # pylint: disable=redefined-builtin
"""
Create a class to pretttify strings of a given format
:param format: a string with the format to use to prettify.
Valid formats are obtained from self.prettifiers
"""
if format is None:
format = 'pass'
try:
self._prettifier_f = self.prettifiers[format] # pylint: disable=unsubscriptable-object
except KeyError:
raise ValueError(f"Unknown prettifier format {format}; valid formats: {', '.join(self.get_prettifiers())}")
def prettify(self, label):
"""
Prettify a label using the format passed in the initializer
:param label: the string to prettify
:return: a prettified string
"""
return self._prettifier_f(label)
def prettify_labels(labels, format=None): # pylint: disable=redefined-builtin
"""
Prettify label for typesetting in various formats
:param labels: a list of length-2 tuples, in the format(position, label)
:param format: a string with the format for the prettifier (e.g. 'agr',
'matplotlib', ...)
:return: the same list as labels, but with the second value possibly replaced
with a prettified version that typesets nicely in the selected format
"""
prettifier = Prettifier(format)
return [(pos, prettifier.prettify(label)) for pos, label in labels]
def join_labels(labels, join_symbol='|', threshold=1.e-6):
"""
Join labels with a joining symbol when they are very close
:param labels: a list of length-2 tuples, in the format(position, label)
:param join_symbol: the string to use to join different paths. By default, a pipe
:param threshold: the threshold to decide if two float values are the same and should
be joined
:return: the same list as labels, but with the second value possibly replaced
with strings joined when close enough
"""
if labels:
new_labels = [list(labels[0])]
# modify labels when in overlapping position
j = 0
for i in range(1, len(labels)):
if abs(labels[i][0] - labels[i - 1][0]) < threshold:
new_labels[j][1] += join_symbol + labels[i][1]
else:
new_labels.append(list(labels[i]))
j += 1
else:
new_labels = []
return new_labels
def strip_prefix(full_string, prefix):
"""
Strip the prefix from the given string and return it. If the prefix is not present
the original string will be returned unaltered
:param full_string: the string from which to remove the prefix
:param prefix: the prefix to remove
:return: the string with prefix removed
"""
if full_string.startswith(prefix):
return full_string.rsplit(prefix)[1]
return full_string
class Capturing:
"""
This class captures stdout and returns it
(as a list, split by lines).
Note: if you raise a SystemExit, you have to catch it outside.
E.g., in our tests, this works::
import sys
with self.assertRaises(SystemExit):
with Capturing() as output:
sys.exit()
But out of the testing environment, the code instead just exits.
To use it, access the obj.stdout_lines, or just iterate over the object
:param capture_stderr: if True, also captures sys.stderr. To access the
lines, use obj.stderr_lines. If False, obj.stderr_lines is None.
"""
# pylint: disable=attribute-defined-outside-init
def __init__(self, capture_stderr=False):
self.stdout_lines = []
super().__init__()
self._capture_stderr = capture_stderr
if self._capture_stderr:
self.stderr_lines = []
else:
self.stderr_lines = None
def __enter__(self):
"""Enter the context where all output is captured."""
self._stdout = sys.stdout
self._stringioout = io.StringIO()
sys.stdout = self._stringioout
if self._capture_stderr:
self._stderr = sys.stderr
self._stringioerr = io.StringIO()
sys.stderr = self._stringioerr
return self
def __exit__(self, *args):
"""Exit the context where all output is captured."""
self.stdout_lines.extend(self._stringioout.getvalue().splitlines())
sys.stdout = self._stdout
del self._stringioout # free up some memory
if self._capture_stderr:
self.stderr_lines.extend(self._stringioerr.getvalue().splitlines())
sys.stderr = self._stderr
del self._stringioerr # free up some memory
def __str__(self):
return str(self.stdout_lines)
def __iter__(self):
return iter(self.stdout_lines)
class ErrorAccumulator:
"""
Allows to run a number of functions and collect all the errors they raise
This allows to validate multiple things and tell the user about all the
errors encountered at once. Works best if the individual functions do not depend on each other.
Does not allow to trace the stack of each error, therefore do not use for debugging, but for
semantical checking with user friendly error messages.
"""
def __init__(self, *error_cls):
self.error_cls = error_cls
self.errors = {k: [] for k in self.error_cls}
def run(self, function, *args, **kwargs):
try:
function(*args, **kwargs)
except self.error_cls as err:
self.errors[err.__class__].append(err)
def success(self):
return bool(not any(self.errors.values()))
def result(self, raise_error=Exception):
if raise_error:
self.raise_errors(raise_error)
return self.success(), self.errors
def raise_errors(self, raise_cls):
if not self.success():
raise raise_cls(f'The following errors were encountered: {self.errors}')
class DatetimePrecision:
"""
A simple class which stores a datetime object with its precision. No
internal check is done (cause itis not possible).
precision: 1 (only full date)
2 (date plus hour)
3 (date + hour + minute)
4 (dare + hour + minute +second)
"""
def __init__(self, dtobj, precision):
""" Constructor to check valid datetime object and precision """
if not isinstance(dtobj, datetime):
raise TypeError('dtobj argument has to be a datetime object')
if not isinstance(precision, int):
raise TypeError('precision argument has to be an integer')
self.dtobj = dtobj
self.precision = precision
|
#
# This file is part of LiteX.
#
# Copyright (c) 2021 Franck Jullien <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
import csv
import re
import datetime
from xml.dom import expatbuilder
import xml.etree.ElementTree as et
from litex.build import tools
namespaces = {
"efxpt" : "http://www.efinixinc.com/peri_design_db",
"xi" : "http://www.w3.org/2001/XInclude"
}
# Interface Writer Block ---------------------------------------------------------------------------
class InterfaceWriterBlock(dict):
def generate(self):
raise NotImplementedError # Must be overloaded
class InterfaceWriterXMLBlock(dict):
def generate(self):
raise NotImplementedError # Must be overloaded
# Interface Writer --------------------------------------------------------------------------------
class InterfaceWriter:
def __init__(self, efinity_path):
self.efinity_path = efinity_path
self.blocks = []
self.xml_blocks = []
self.filename = ""
self.platform = None
def set_build_params(self, platform, build_name):
self.filename = build_name
self.platform = platform
def generate_xml_blocks(self):
et.register_namespace("efxpt", "http://www.efinixinc.com/peri_design_db")
tree = et.parse(self.filename + ".peri.xml")
root = tree.getroot()
for block in self.xml_blocks:
if isinstance(block, InterfaceWriterXMLBlock):
block.generate(root, namespaces)
else:
if block["type"] == "LVDS":
self.add_lvds_xml(root, block)
if block["type"] == "DRAM":
self.add_dram_xml(root, block)
xml_string = et.tostring(root, "utf-8")
reparsed = expatbuilder.parseString(xml_string, False)
print_string = reparsed.toprettyxml(indent=" ")
# Remove lines with only whitespaces. Not sure why they are here
print_string = os.linesep.join([s for s in print_string.splitlines() if s.strip()])
tools.write_to_file("{}.peri.xml".format(self.filename), print_string)
def header(self, build_name, partnumber):
header = "# Autogenerated by LiteX / git: " + tools.get_litex_git_revision()
header += """
import os
import sys
import pprint
home = "{0}"
os.environ["EFXPT_HOME"] = home + "/pt"
os.environ["EFXPGM_HOME"] = home + "/pgm"
os.environ["EFXDBG_HOME"] = home + "/debugger"
os.environ["EFXIPM_HOME"] = home + "/ipm"
sys.path.append(home + "/pt/bin")
sys.path.append(home + "/lib/python3.8/site-packages")
from api_service.design import DesignAPI
from api_service.device import DeviceAPI
is_verbose = {1}
design = DesignAPI(is_verbose)
device = DeviceAPI(is_verbose)
design.create("{2}", "{3}", "./../gateware", overwrite=True)
"""
return header.format(self.efinity_path, "True", build_name, partnumber)
def get_block(self, name):
for b in self.blocks:
if b["name"] == name:
return b
return None
def generate_gpio(self, block, verbose=True):
name = block["name"]
mode = block["mode"]
cmd = ""
if mode == "INOUT":
if len(block["location"]) == 1:
cmd += f'design.create_inout_gpio("{name}")\n'
cmd += f'design.assign_pkg_pin("{name}','{block['location'][0]}")\n'
else:
cmd += f'design.create_inout_gpio("{name}',{block['size']-1},0)\n'
for i, pad in enumerate(block["location"]):
cmd += f'design.assign_pkg_pin("{name}[{i}]","{pad}")\n'
cmd += "\n"
return cmd
if mode == "INPUT":
if len(block["location"]) == 1:
cmd += f'design.create_input_gpio("{name}")\n'
cmd += f'design.assign_pkg_pin("{name}','{block['location'][0]}")\n'
else:
cmd += f'design.create_input_gpio("{name}',{block['size']-1},0)\n'
for i, pad in enumerate(block["location"]):
cmd += f'design.assign_pkg_pin("{name}[{i}]","{pad}")\n'
if "in_reg" in block:
cmd += f'design.set_property("{name}','IN_REG','{block['in_reg']}")\n'
cmd += f'design.set_property("{name}','IN_CLK_PIN','{block['in_clk_pin']}")\n'
return cmd
if mode == "OUTPUT":
if len(block["location"]) == 1:
cmd += 'design.create_output_gpio("{}")\n'.format(name)
cmd += 'design.assign_pkg_pin("{}","{}")\n'.format(name, block["location"][0])
else:
cmd += 'design.create_input_gpio("{}",{},0)\n'.format(name, block["size"]-1)
for i, pad in enumerate(block["location"]):
cmd += 'design.assign_pkg_pin("{}[{}]","{}")\n'.format(name, i, pad)
if "out_reg" in block:
cmd += 'design.set_property("{}","OUT_REG","{}")\n'.format(name, block["out_reg"])
cmd += 'design.set_property("{}","OUT_CLK_PIN","{}")\n'.format(name, block["out_clk_pin"])
if "drive_strength" in block:
cmd += 'design.set_property("{}","DRIVE_STRENGTH","4")\n'.format(name, block["drive_strength"])
cmd += "\n"
return cmd
if mode == "INPUT_CLK":
cmd += 'design.create_input_clock_gpio("{}")\n'.format(name)
cmd += 'design.set_property("{}","IN_PIN","{}")\n'.format(name, name)
cmd += 'design.assign_pkg_pin("{}","{}")\n\n'.format(name, block["location"])
return cmd
if mode == "OUTPUT_CLK":
cmd += 'design.create_clockout_gpio("{}")\n'.format(name)
cmd += 'design.set_property("{}","OUT_CLK_PIN","{}")\n'.format(name, name)
cmd += 'design.assign_pkg_pin("{}","{}")\n\n'.format(name, block["location"])
return cmd
cmd = "# TODO: " + str(block) +"\n"
return cmd
def generate_pll(self, block, partnumber, verbose=True):
name = block["name"]
cmd = "# ---------- PLL {} ---------\n".format(name)
cmd += 'design.create_block("{}", block_type="PLL")\n'.format(name)
cmd += 'pll_config = {{ "REFCLK_FREQ":"{}" }}\n'.format(block["input_freq"] / 1e6)
cmd += 'design.set_property("{}", pll_config, block_type="PLL")\n\n'.format(name)
if block["input_clock"] == "EXTERNAL":
# PLL V1 has a different configuration
if partnumber[0:2] in ["T4", "T8"]:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_res="{}", refclk_name="{}", ext_refclk_no="{}")\n\n' \
.format(name, block["resource"], block["input_clock_pad"], block["input_clock_name"], block["clock_no"])
else:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_src="{}", refclk_name="{}", ext_refclk_no="{}")\n\n' \
.format(name, block["resource"], block["input_clock"], block["input_clock_name"], block["clock_no"])
else:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_name="{}", refclk_src="CORE")\n'.format(name, block["resource"], block["input_signal"])
cmd += 'design.set_property("{}", "CORE_CLK_PIN", "{}", block_type="PLL")\n\n'.format(name, block["input_signal"])
cmd += 'design.set_property("{}","LOCKED_PIN","{}", block_type="PLL")\n'.format(name, block["locked"])
if block["rstn"] != "":
cmd += 'design.set_property("{}","RSTN_PIN","{}", block_type="PLL")\n\n'.format(name, block["rstn"])
# Output clock 0 is enabled by default
for i, clock in enumerate(block["clk_out"]):
if i > 0:
cmd += 'pll_config = {{ "CLKOUT{}_EN":"1", "CLKOUT{}_PIN":"{}" }}\n'.format(i, i, clock[0])
else:
cmd += 'pll_config = {{ "CLKOUT{}_PIN":"{}" }}\n'.format(i, clock[0])
cmd += 'design.set_property("{}", pll_config, block_type="PLL")\n\n'.format(name)
for i, clock in enumerate(block["clk_out"]):
cmd += 'design.set_property("{}","CLKOUT{}_PHASE","{}","PLL")\n'.format(name, i, clock[2])
cmd += "target_freq = {\n"
for i, clock in enumerate(block["clk_out"]):
cmd += ' "CLKOUT{}_FREQ": "{}",\n'.format(i, clock[1] / 1e6)
cmd += "}\n"
cmd += 'calc_result = design.auto_calc_pll_clock("{}", target_freq)\n'.format(name)
if "extra" in block:
cmd += block["extra"]
cmd += "\n"
if verbose:
cmd += 'print("#### {} ####")\n'.format(name)
cmd += 'clksrc_info = design.trace_ref_clock("{}", block_type="PLL")\n'.format(name)
cmd += 'pprint.pprint(clksrc_info)\n'
cmd += 'clock_source_prop = ["REFCLK_SOURCE", "CORE_CLK_PIN", "EXT_CLK", "CLKOUT1_EN", "CLKOUT2_EN","REFCLK_FREQ", "RESOURCE"]\n'
cmd += 'clock_source_prop += ["CLKOUT0_FREQ", "CLKOUT1_FREQ", "CLKOUT2_FREQ"]\n'
cmd += 'clock_source_prop += ["CLKOUT0_PHASE", "CLKOUT1_PHASE", "CLKOUT2_PHASE"]\n'
cmd += 'prop_map = design.get_property("{}", clock_source_prop, block_type="PLL")\n'.format(name)
cmd += 'pprint.pprint(prop_map)\n'
cmd += "# ---------- END PLL {} ---------\n\n".format(name)
return cmd
def generate(self, partnumber):
output = ""
for block in self.blocks:
if isinstance(block, InterfaceWriterBlock):
output += block.generate()
else:
if block["type"] == "PLL":
output += self.generate_pll(block, partnumber)
if block["type"] == "GPIO":
output += self.generate_gpio(block)
return output
def footer(self):
return """
# Check design, generate constraints and reports
design.generate(enable_bitstream=True)
# Save the configured periphery design
design.save()"""
def add_lvds_xml(self, root, params):
lvds_info = root.find("efxpt:lvds_info", namespaces)
if params["mode"] == "OUTPUT":
dir = "tx"
mode = "out"
else:
dir = "rx"
mode = "in"
pad = self.platform.parser.get_gpio_instance_from_pin(params["location"][0])
pad = pad.replace("TXP", "TX")
pad = pad.replace("TXN", "TX")
pad = pad.replace("RXP", "RX")
pad = pad.replace("RXN", "RX")
# Sometimes there is an extra identifier at the end
# TODO: do a better parser
if pad.count("_") == 2:
pad = pad.rsplit("_", 1)[0]
lvds = et.SubElement(lvds_info, "efxpt:lvds",
name = params["name"],
lvds_def = pad,
ops_type = dir
)
et.SubElement(lvds, "efxpt:ltx_info",
pll_instance = "",
fast_clock_name = "{}".format(params["fast_clk"]),
slow_clock_name = "{}".format(params["slow_clk"]),
reset_name = "",
out_bname = "{}".format(params["name"]),
oe_name = "",
clock_div = "1",
mode = "{}".format(mode),
serialization = "{}".format(params["serialisation"]),
reduced_swing = "false",
load = "3"
)
|
#
# This file is part of LiteX.
#
# Copyright (c) 2021 Franck Jullien <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
import csv
import re
import datetime
from xml.dom import expatbuilder
import xml.etree.ElementTree as et
from litex.build import tools
namespaces = {
"efxpt" : "http://www.efinixinc.com/peri_design_db",
"xi" : "http://www.w3.org/2001/XInclude"
}
# Interface Writer Block ---------------------------------------------------------------------------
class InterfaceWriterBlock(dict):
def generate(self):
raise NotImplementedError # Must be overloaded
class InterfaceWriterXMLBlock(dict):
def generate(self):
raise NotImplementedError # Must be overloaded
# Interface Writer --------------------------------------------------------------------------------
class InterfaceWriter:
def __init__(self, efinity_path):
self.efinity_path = efinity_path
self.blocks = []
self.xml_blocks = []
self.filename = ""
self.platform = None
def set_build_params(self, platform, build_name):
self.filename = build_name
self.platform = platform
def generate_xml_blocks(self):
et.register_namespace("efxpt", "http://www.efinixinc.com/peri_design_db")
tree = et.parse(self.filename + ".peri.xml")
root = tree.getroot()
for block in self.xml_blocks:
if isinstance(block, InterfaceWriterXMLBlock):
block.generate(root, namespaces)
else:
if block["type"] == "LVDS":
self.add_lvds_xml(root, block)
if block["type"] == "DRAM":
self.add_dram_xml(root, block)
xml_string = et.tostring(root, "utf-8")
reparsed = expatbuilder.parseString(xml_string, False)
print_string = reparsed.toprettyxml(indent=" ")
# Remove lines with only whitespaces. Not sure why they are here
print_string = os.linesep.join([s for s in print_string.splitlines() if s.strip()])
tools.write_to_file("{}.peri.xml".format(self.filename), print_string)
def header(self, build_name, partnumber):
header = "# Autogenerated by LiteX / git: " + tools.get_litex_git_revision()
header += """
import os
import sys
import pprint
home = "{0}"
os.environ["EFXPT_HOME"] = home + "/pt"
os.environ["EFXPGM_HOME"] = home + "/pgm"
os.environ["EFXDBG_HOME"] = home + "/debugger"
os.environ["EFXIPM_HOME"] = home + "/ipm"
sys.path.append(home + "/pt/bin")
sys.path.append(home + "/lib/python3.8/site-packages")
from api_service.design import DesignAPI
from api_service.device import DeviceAPI
is_verbose = {1}
design = DesignAPI(is_verbose)
device = DeviceAPI(is_verbose)
design.create("{2}", "{3}", "./../gateware", overwrite=True)
"""
return header.format(self.efinity_path, "True", build_name, partnumber)
def get_block(self, name):
for b in self.blocks:
if b["name"] == name:
return b
return None
def generate_gpio(self, block, verbose=True):
name = block["name"]
mode = block["mode"]
cmd = ""
if mode == "INOUT":
if len(block["location"]) == 1:
cmd += f'design.create_inout_gpio("{name}")\n'
cmd += f'design.assign_pkg_pin("{name}","{block["location"][0]}")\n'
else:
cmd += f'design.create_inout_gpio("{name}",{block["size"]-1},0)\n'
for i, pad in enumerate(block["location"]):
cmd += f'design.assign_pkg_pin("{name}[{i}]","{pad}")\n'
cmd += "\n"
return cmd
if mode == "INPUT":
if len(block["location"]) == 1:
cmd += f'design.create_input_gpio("{name}")\n'
cmd += f'design.assign_pkg_pin("{name}","{block["location"][0]}")\n'
else:
cmd += f'design.create_input_gpio("{name}",{block["size"]-1},0)\n'
for i, pad in enumerate(block["location"]):
cmd += f'design.assign_pkg_pin("{name}[{i}]","{pad}")\n'
if "in_reg" in block:
cmd += f'design.set_property("{name}","IN_REG","{block["in_reg"]}")\n'
cmd += f'design.set_property("{name}","IN_CLK_PIN","{block["in_clk_pin"]}")\n'
return cmd
if mode == "OUTPUT":
if len(block["location"]) == 1:
cmd += 'design.create_output_gpio("{}")\n'.format(name)
cmd += 'design.assign_pkg_pin("{}","{}")\n'.format(name, block["location"][0])
else:
cmd += 'design.create_input_gpio("{}",{},0)\n'.format(name, block["size"]-1)
for i, pad in enumerate(block["location"]):
cmd += 'design.assign_pkg_pin("{}[{}]","{}")\n'.format(name, i, pad)
if "out_reg" in block:
cmd += 'design.set_property("{}","OUT_REG","{}")\n'.format(name, block["out_reg"])
cmd += 'design.set_property("{}","OUT_CLK_PIN","{}")\n'.format(name, block["out_clk_pin"])
if "drive_strength" in block:
cmd += 'design.set_property("{}","DRIVE_STRENGTH","4")\n'.format(name, block["drive_strength"])
cmd += "\n"
return cmd
if mode == "INPUT_CLK":
cmd += 'design.create_input_clock_gpio("{}")\n'.format(name)
cmd += 'design.set_property("{}","IN_PIN","{}")\n'.format(name, name)
cmd += 'design.assign_pkg_pin("{}","{}")\n\n'.format(name, block["location"])
return cmd
if mode == "OUTPUT_CLK":
cmd += 'design.create_clockout_gpio("{}")\n'.format(name)
cmd += 'design.set_property("{}","OUT_CLK_PIN","{}")\n'.format(name, name)
cmd += 'design.assign_pkg_pin("{}","{}")\n\n'.format(name, block["location"])
return cmd
cmd = "# TODO: " + str(block) +"\n"
return cmd
def generate_pll(self, block, partnumber, verbose=True):
name = block["name"]
cmd = "# ---------- PLL {} ---------\n".format(name)
cmd += 'design.create_block("{}", block_type="PLL")\n'.format(name)
cmd += 'pll_config = {{ "REFCLK_FREQ":"{}" }}\n'.format(block["input_freq"] / 1e6)
cmd += 'design.set_property("{}", pll_config, block_type="PLL")\n\n'.format(name)
if block["input_clock"] == "EXTERNAL":
# PLL V1 has a different configuration
if partnumber[0:2] in ["T4", "T8"]:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_res="{}", refclk_name="{}", ext_refclk_no="{}")\n\n' \
.format(name, block["resource"], block["input_clock_pad"], block["input_clock_name"], block["clock_no"])
else:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_src="{}", refclk_name="{}", ext_refclk_no="{}")\n\n' \
.format(name, block["resource"], block["input_clock"], block["input_clock_name"], block["clock_no"])
else:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_name="{}", refclk_src="CORE")\n'.format(name, block["resource"], block["input_signal"])
cmd += 'design.set_property("{}", "CORE_CLK_PIN", "{}", block_type="PLL")\n\n'.format(name, block["input_signal"])
cmd += 'design.set_property("{}","LOCKED_PIN","{}", block_type="PLL")\n'.format(name, block["locked"])
if block["rstn"] != "":
cmd += 'design.set_property("{}","RSTN_PIN","{}", block_type="PLL")\n\n'.format(name, block["rstn"])
# Output clock 0 is enabled by default
for i, clock in enumerate(block["clk_out"]):
if i > 0:
cmd += 'pll_config = {{ "CLKOUT{}_EN":"1", "CLKOUT{}_PIN":"{}" }}\n'.format(i, i, clock[0])
else:
cmd += 'pll_config = {{ "CLKOUT{}_PIN":"{}" }}\n'.format(i, clock[0])
cmd += 'design.set_property("{}", pll_config, block_type="PLL")\n\n'.format(name)
for i, clock in enumerate(block["clk_out"]):
cmd += 'design.set_property("{}","CLKOUT{}_PHASE","{}","PLL")\n'.format(name, i, clock[2])
cmd += "target_freq = {\n"
for i, clock in enumerate(block["clk_out"]):
cmd += ' "CLKOUT{}_FREQ": "{}",\n'.format(i, clock[1] / 1e6)
cmd += "}\n"
cmd += 'calc_result = design.auto_calc_pll_clock("{}", target_freq)\n'.format(name)
if "extra" in block:
cmd += block["extra"]
cmd += "\n"
if verbose:
cmd += 'print("#### {} ####")\n'.format(name)
cmd += 'clksrc_info = design.trace_ref_clock("{}", block_type="PLL")\n'.format(name)
cmd += 'pprint.pprint(clksrc_info)\n'
cmd += 'clock_source_prop = ["REFCLK_SOURCE", "CORE_CLK_PIN", "EXT_CLK", "CLKOUT1_EN", "CLKOUT2_EN","REFCLK_FREQ", "RESOURCE"]\n'
cmd += 'clock_source_prop += ["CLKOUT0_FREQ", "CLKOUT1_FREQ", "CLKOUT2_FREQ"]\n'
cmd += 'clock_source_prop += ["CLKOUT0_PHASE", "CLKOUT1_PHASE", "CLKOUT2_PHASE"]\n'
cmd += 'prop_map = design.get_property("{}", clock_source_prop, block_type="PLL")\n'.format(name)
cmd += 'pprint.pprint(prop_map)\n'
cmd += "# ---------- END PLL {} ---------\n\n".format(name)
return cmd
def generate(self, partnumber):
output = ""
for block in self.blocks:
if isinstance(block, InterfaceWriterBlock):
output += block.generate()
else:
if block["type"] == "PLL":
output += self.generate_pll(block, partnumber)
if block["type"] == "GPIO":
output += self.generate_gpio(block)
return output
def footer(self):
return """
# Check design, generate constraints and reports
design.generate(enable_bitstream=True)
# Save the configured periphery design
design.save()"""
def add_lvds_xml(self, root, params):
lvds_info = root.find("efxpt:lvds_info", namespaces)
if params["mode"] == "OUTPUT":
dir = "tx"
mode = "out"
else:
dir = "rx"
mode = "in"
pad = self.platform.parser.get_gpio_instance_from_pin(params["location"][0])
pad = pad.replace("TXP", "TX")
pad = pad.replace("TXN", "TX")
pad = pad.replace("RXP", "RX")
pad = pad.replace("RXN", "RX")
# Sometimes there is an extra identifier at the end
# TODO: do a better parser
if pad.count("_") == 2:
pad = pad.rsplit("_", 1)[0]
lvds = et.SubElement(lvds_info, "efxpt:lvds",
name = params["name"],
lvds_def = pad,
ops_type = dir
)
et.SubElement(lvds, "efxpt:ltx_info",
pll_instance = "",
fast_clock_name = "{}".format(params["fast_clk"]),
slow_clock_name = "{}".format(params["slow_clk"]),
reset_name = "",
out_bname = "{}".format(params["name"]),
oe_name = "",
clock_div = "1",
mode = "{}".format(mode),
serialization = "{}".format(params["serialisation"]),
reduced_swing = "false",
load = "3"
)
|
from typing import Any, MutableMapping, Optional
def merge_dicts(a: MutableMapping[str, Any], b: MutableMapping[str, Any], path: Optional[list] = None) -> MutableMapping[str, Any]:
"""
Merge the keys and values of the two dicts.
:param a:
:param b:
:param path:
:return:
:raises ValueError: When both dicts assign the same key, with different values.
"""
if path is None:
path = []
for key in b:
if key not in a:
a[key] = b[key]
continue
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dicts(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
raise ValueError(f"Conflict at {".".join(path + [str(key)])}")
return a
|
from typing import Any, MutableMapping, Optional
def merge_dicts(a: MutableMapping[str, Any], b: MutableMapping[str, Any], path: Optional[list] = None) -> MutableMapping[str, Any]:
"""
Merge the keys and values of the two dicts.
:param a:
:param b:
:param path:
:return:
:raises ValueError: When both dicts assign the same key, with different values.
"""
if path is None:
path = []
for key in b:
if key not in a:
a[key] = b[key]
continue
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dicts(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
raise ValueError(f"Conflict at {'.'.join(path + [str(key)])}")
return a
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import os
import re
import warnings
from typing import Any, Dict, List, Optional, Tuple, Union
from packaging import version
from requests import HTTPError
from . import __version__
from .dynamic_module_utils import custom_object_save
from .utils import (
CONFIG_NAME,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
is_torch_available,
logging,
)
logger = logging.get_logger(__name__)
_re_configuration_file = re.compile(r"config\.(.*)\.json")
class PretrainedConfig(PushToHubMixin):
r"""
Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
methods for loading/downloading/saving configurations.
<Tip>
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights. It only affects the model's configuration.
</Tip>
Class attributes (overridden by derived classes):
- **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate
the correct object in [`~transformers.AutoConfig`].
- **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the
config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like:
[`~transformers.EncoderDecoderConfig`] or [`~RagConfig`].
- **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary
outputs of the model during inference.
- **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized
naming of attributes.
Common attributes (present in all subclasses):
- **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the
embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
- **hidden_size** (`int`) -- The hidden size of the model.
- **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the
model.
- **num_hidden_layers** (`int`) -- The number of blocks in the model.
Arg:
name_or_path (`str`, *optional*, defaults to `""`):
Store the string that was passed to [`PreTrainedModel.from_pretrained`] or
[`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created
with such a method.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not the model should return all hidden-states.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not the model should returns all attentions.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.
is_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
cross_attention_hidden_size** (`bool`, *optional*):
The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder
setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.
add_cross_attention (`bool`, *optional*, defaults to `False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models
in `AUTO_MODELS_FOR_CAUSAL_LM`.
tie_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
and decoder model to have the exact same parameter names.
prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
heads to prune in said layer.
For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
chunk_size_feed_forward (`int`, *optional*, defaults to `0`):
The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that
the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <
sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed
Forward Chunking work?](../glossary.html#feed-forward-chunking).
> Parameters for sequence generation
max_length (`int`, *optional*, defaults to 20):
Maximum length that will be used by default in the `generate` method of the model.
min_length (`int`, *optional*, defaults to 10):
Minimum length that will be used by default in the `generate` method of the model.
do_sample (`bool`, *optional*, defaults to `False`):
Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ;
use greedy decoding otherwise.
early_stopping (`bool`, *optional*, defaults to `False`):
Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
when at least `num_beams` sentences are finished per batch or not.
num_beams (`int`, *optional*, defaults to 1):
Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
no beam search.
num_beam_groups (`int`, *optional*, defaults to 1):
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams
that will be used by default in the `generate` method of the model. 1 means no group beam search.
diversity_penalty (`float`, *optional*, defaults to 0.0):
Value to control diversity for group beam search. that will be used by default in the `generate` method of
the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.
temperature (`float`, *optional*, defaults to 1):
The value used to module the next token probabilities that will be used by default in the `generate` method
of the model. Must be strictly positive.
top_k (`int`, *optional*, defaults to 50):
Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in
the `generate` method of the model.
top_p (`float`, *optional*, defaults to 1):
Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1,
only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.
repetition_penalty (`float`, *optional*, defaults to 1):
Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0
means no penalty.
length_penalty (`float`, *optional*, defaults to 1):
Exponential penalty to the length that will be used by default in the `generate` method of the model.
no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the
`generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can
only occur once.
encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by
default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all
ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.
bad_words_ids (`List[int]`, *optional*):
List of token ids that are not allowed to be generated that will be used by default in the `generate`
method of the model. In order to get the tokens of the words that should not appear in the generated text,
use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences (`int`, *optional*, defaults to 1):
Number of independently computed returned sequences for each element in the batch that will be used by
default in the `generate` method of the model.
output_scores (`bool`, *optional*, defaults to `False`):
Whether the model should return the logits when used for generation.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`.
forced_bos_token_id (`int`, *optional*):
The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
language token.
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached.
remove_invalid_values (`bool`, *optional*):
Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash.
Note that using `remove_invalid_values` can slow down generation.
> Parameters for fine-tuning tasks
architectures (`List[str]`, *optional*):
Model architectures that can be used with the model pretrained weights.
finetuning_task (`str`, *optional*):
Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow
or PyTorch) checkpoint.
id2label (`Dict[int, str]`, *optional*):
A map from index (for instance prediction index, or target index) to label.
label2id (`Dict[str, int]`, *optional*): A map from label to index for the model.
num_labels (`int`, *optional*):
Number of labels to use in the last layer added to the model, typically for a classification task.
task_specific_params (`Dict[str, Any]`, *optional*):
Additional keyword arguments to store for the current task.
problem_type (`str`, *optional*):
Problem type for `XxxForSequenceClassification` models. Can be one of `"regression"`,
`"single_label_classification"` or `"multi_label_classification"`.
> Parameters linked to the tokenizer
tokenizer_class (`str`, *optional*):
The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the
model by default).
prefix (`str`, *optional*):
A specific prompt that should be added at the beginning of each text before calling the model.
bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token.
pad_token_id (`int`, *optional*): The id of the _padding_ token.
eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token.
decoder_start_token_id (`int`, *optional*):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.
sep_token_id (`int`, *optional*): The id of the _separation_ token.
> PyTorch specific parameters
torchscript (`bool`, *optional*, defaults to `False`):
Whether or not the model should be used with Torchscript.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
torch_dtype (`str`, *optional*):
The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`
(which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved
model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load
`float16` weights. Since the config object is stored in plain text, this attribute contains just the
floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the
`"float16"` string.
This attribute is currently not being used during model loading time, but this may change in the future
versions. But we can already start preparing for the future by saving the dtype with save_pretrained.
> TensorFlow specific parameters
use_bfloat16 (`bool`, *optional*, defaults to `False`):
Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).
"""
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
def __setattr__(self, key, value):
if key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
super().__setattr__(key, value)
def __getattribute__(self, key):
if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
return super().__getattribute__(key)
def __init__(self, **kwargs):
# Attributes with defaults
self.return_dict = kwargs.pop("return_dict", True)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.cross_attention_hidden_size = kwargs.pop("cross_attention_hidden_size", None)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.typical_p = kwargs.pop("typical_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
# we will start using self.torch_dtype in v5, but to be consistent with
# from_pretrained's torch_dtype arg convert it to an actual torch.dtype object
if is_torch_available():
import torch
self.torch_dtype = getattr(torch, self.torch_dtype)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.sep_token_id = kwargs.pop("sep_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# regression / multi-label classification
self.problem_type = kwargs.pop("problem_type", None)
allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
if self.problem_type is not None and self.problem_type not in allowed_problem_types:
raise ValueError(
f"The config parameter `problem_type` was not understood: received {self.problem_type} "
"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
)
# TPU arguments
if kwargs.pop("xla_device", None) is not None:
logger.warning(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)
# Name or path to the pretrained checkpoint
self._name_or_path = str(kwargs.pop("name_or_path", ""))
# Drop the transformers version info
self.transformers_version = kwargs.pop("transformers_version", None)
# Deal with gradient checkpointing
if kwargs.get("gradient_checkpointing", False):
warnings.warn(
"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@property
def name_or_path(self) -> str:
return getattr(self, "_name_or_path", None)
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
@property
def use_return_dict(self) -> bool:
"""
`bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
"""
Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the
[`~PretrainedConfig.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
<Tip warning={true}>
Using `push_to_hub=True` will synchronize the repository you are pushing to with `save_directory`,
which requires `save_directory` to be a local clone of the repo you are pushing to if it's an existing
folder. Pass along `temp_dir=True` to use a temporary directory instead.
</Tip>
kwargs:
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
# If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Configuration pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
r"""
Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or
namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.
- a path to a *directory* containing a configuration file saved using the
[`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
- a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object.
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
kwargs (`Dict[str, Any]`, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Returns:
[`PretrainedConfig`]: The configuration object instantiated from this pretrained model.
Examples:
```python
# We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained(
"bert-base-uncased"
) # Download configuration from huggingface.co and cache.
config = BertConfig.from_pretrained(
"./test/saved_model/"
) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*
config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json")
config = BertConfig.from_pretrained("bert-base-uncased", output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = BertConfig.from_pretrained(
"bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
)
assert config.output_attentions == True
assert unused_kwargs == {"foo": False}
```"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict["model_type"]} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
[`PretrainedConfig`] using `from_dict`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
"""
original_kwargs = copy.deepcopy(kwargs)
# Get config dict associated with the base config file
config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
# That config file may point us toward another config file to use.
if "configuration_files" in config_dict:
configuration_file = get_configuration_file(config_dict["configuration_files"])
config_dict, kwargs = cls._get_config_dict(
pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs
)
return config_dict, kwargs
@classmethod
def _get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, configuration_file)
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=configuration_file, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on "
"'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having "
"permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass "
"`use_auth_token=True`."
)
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this "
f"model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for "
"available revisions."
)
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {configuration_file}."
)
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}"
)
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in the cached "
f"files and it looks like {pretrained_model_name_or_path} is not the path to a directory containing a "
"{configuration_file} file.\nCheckout your internet connection or see how to run the library in "
"offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'."
)
except EnvironmentError:
raise EnvironmentError(
f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a {configuration_file} file"
)
try:
# Load config dict
config_dict = cls._dict_from_json_file(resolved_config_file)
except (json.JSONDecodeError, UnicodeDecodeError):
raise EnvironmentError(
f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
)
if resolved_config_file == config_file:
logger.info(f"loading configuration file {config_file}")
else:
logger.info(f"loading configuration file {config_file} from cache at {resolved_config_file}")
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
"""
Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
if key != "torch_dtype":
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Model config {config}")
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
"""
Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.
Args:
json_file (`str` or `os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
# get class specific config dict
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key])
):
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
if "_auto_class" in output:
del output["_auto_class"]
# Transformers version when serializing the model
output["transformers_version"] = __version__
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
"""
Updates attributes of this class with attributes from `config_dict`.
Args:
config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
def update_from_string(self, update_str: str):
"""
Updates attributes of this class with attributes from `update_str`.
The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
The keys to change have to already exist in the config object.
Args:
update_str (`str`): String with attributes that should be updated for this class.
"""
d = dict(x.split("=") for x in update_str.split(","))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict")
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ["true", "1", "y", "yes"]:
v = True
elif v.lower() in ["false", "0", "n", "no"]:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise ValueError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)
setattr(self, k, v)
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
"""
Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
string, which can then be stored in the json format.
"""
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
for value in d.values():
if isinstance(value, dict):
self.dict_torch_dtype_to_str(value)
@classmethod
def register_for_auto_class(cls, auto_class="AutoConfig"):
"""
Register this class with a given auto class. This should only be used for custom configurations as the ones in
the library are already mapped with `AutoConfig`.
<Tip warning={true}>
This API is experimental and may have some slight breaking changes in the next releases.
</Tip>
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`):
The auto class to register this new configuration with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
def get_configuration_file(configuration_files: List[str]) -> str:
"""
Get the configuration file to use for this version of transformers.
Args:
configuration_files (`List[str]`): The list of available configuration files.
Returns:
`str`: The configuration file to use.
"""
configuration_files_map = {}
for file_name in configuration_files:
search = _re_configuration_file.search(file_name)
if search is not None:
v = search.groups()[0]
configuration_files_map[v] = file_name
available_versions = sorted(configuration_files_map.keys())
# Defaults to FULL_CONFIGURATION_FILE and then try to look at some newer versions.
configuration_file = CONFIG_NAME
transformers_version = version.parse(__version__)
for v in available_versions:
if version.parse(v) <= transformers_version:
configuration_file = configuration_files_map[v]
else:
# No point going further since the versions are sorted.
break
return configuration_file
PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
object="config", object_class="AutoConfig", object_files="configuration file"
)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import os
import re
import warnings
from typing import Any, Dict, List, Optional, Tuple, Union
from packaging import version
from requests import HTTPError
from . import __version__
from .dynamic_module_utils import custom_object_save
from .utils import (
CONFIG_NAME,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
is_torch_available,
logging,
)
logger = logging.get_logger(__name__)
_re_configuration_file = re.compile(r"config\.(.*)\.json")
class PretrainedConfig(PushToHubMixin):
r"""
Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
methods for loading/downloading/saving configurations.
<Tip>
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights. It only affects the model's configuration.
</Tip>
Class attributes (overridden by derived classes):
- **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate
the correct object in [`~transformers.AutoConfig`].
- **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the
config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like:
[`~transformers.EncoderDecoderConfig`] or [`~RagConfig`].
- **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary
outputs of the model during inference.
- **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized
naming of attributes.
Common attributes (present in all subclasses):
- **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the
embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).
- **hidden_size** (`int`) -- The hidden size of the model.
- **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the
model.
- **num_hidden_layers** (`int`) -- The number of blocks in the model.
Arg:
name_or_path (`str`, *optional*, defaults to `""`):
Store the string that was passed to [`PreTrainedModel.from_pretrained`] or
[`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created
with such a method.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not the model should return all hidden-states.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not the model should returns all attentions.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.
is_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
cross_attention_hidden_size** (`bool`, *optional*):
The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder
setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.
add_cross_attention (`bool`, *optional*, defaults to `False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models
in `AUTO_MODELS_FOR_CAUSAL_LM`.
tie_encoder_decoder (`bool`, *optional*, defaults to `False`):
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
and decoder model to have the exact same parameter names.
prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
heads to prune in said layer.
For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
chunk_size_feed_forward (`int`, *optional*, defaults to `0`):
The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that
the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <
sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed
Forward Chunking work?](../glossary.html#feed-forward-chunking).
> Parameters for sequence generation
max_length (`int`, *optional*, defaults to 20):
Maximum length that will be used by default in the `generate` method of the model.
min_length (`int`, *optional*, defaults to 10):
Minimum length that will be used by default in the `generate` method of the model.
do_sample (`bool`, *optional*, defaults to `False`):
Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ;
use greedy decoding otherwise.
early_stopping (`bool`, *optional*, defaults to `False`):
Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
when at least `num_beams` sentences are finished per batch or not.
num_beams (`int`, *optional*, defaults to 1):
Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
no beam search.
num_beam_groups (`int`, *optional*, defaults to 1):
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams
that will be used by default in the `generate` method of the model. 1 means no group beam search.
diversity_penalty (`float`, *optional*, defaults to 0.0):
Value to control diversity for group beam search. that will be used by default in the `generate` method of
the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.
temperature (`float`, *optional*, defaults to 1):
The value used to module the next token probabilities that will be used by default in the `generate` method
of the model. Must be strictly positive.
top_k (`int`, *optional*, defaults to 50):
Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in
the `generate` method of the model.
top_p (`float`, *optional*, defaults to 1):
Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1,
only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.
repetition_penalty (`float`, *optional*, defaults to 1):
Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0
means no penalty.
length_penalty (`float`, *optional*, defaults to 1):
Exponential penalty to the length that will be used by default in the `generate` method of the model.
no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the
`generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can
only occur once.
encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by
default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all
ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.
bad_words_ids (`List[int]`, *optional*):
List of token ids that are not allowed to be generated that will be used by default in the `generate`
method of the model. In order to get the tokens of the words that should not appear in the generated text,
use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences (`int`, *optional*, defaults to 1):
Number of independently computed returned sequences for each element in the batch that will be used by
default in the `generate` method of the model.
output_scores (`bool`, *optional*, defaults to `False`):
Whether the model should return the logits when used for generation.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`.
forced_bos_token_id (`int`, *optional*):
The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
language token.
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached.
remove_invalid_values (`bool`, *optional*):
Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash.
Note that using `remove_invalid_values` can slow down generation.
> Parameters for fine-tuning tasks
architectures (`List[str]`, *optional*):
Model architectures that can be used with the model pretrained weights.
finetuning_task (`str`, *optional*):
Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow
or PyTorch) checkpoint.
id2label (`Dict[int, str]`, *optional*):
A map from index (for instance prediction index, or target index) to label.
label2id (`Dict[str, int]`, *optional*): A map from label to index for the model.
num_labels (`int`, *optional*):
Number of labels to use in the last layer added to the model, typically for a classification task.
task_specific_params (`Dict[str, Any]`, *optional*):
Additional keyword arguments to store for the current task.
problem_type (`str`, *optional*):
Problem type for `XxxForSequenceClassification` models. Can be one of `"regression"`,
`"single_label_classification"` or `"multi_label_classification"`.
> Parameters linked to the tokenizer
tokenizer_class (`str`, *optional*):
The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the
model by default).
prefix (`str`, *optional*):
A specific prompt that should be added at the beginning of each text before calling the model.
bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token.
pad_token_id (`int`, *optional*): The id of the _padding_ token.
eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token.
decoder_start_token_id (`int`, *optional*):
If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.
sep_token_id (`int`, *optional*): The id of the _separation_ token.
> PyTorch specific parameters
torchscript (`bool`, *optional*, defaults to `False`):
Whether or not the model should be used with Torchscript.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
torch_dtype (`str`, *optional*):
The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`
(which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved
model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load
`float16` weights. Since the config object is stored in plain text, this attribute contains just the
floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the
`"float16"` string.
This attribute is currently not being used during model loading time, but this may change in the future
versions. But we can already start preparing for the future by saving the dtype with save_pretrained.
> TensorFlow specific parameters
use_bfloat16 (`bool`, *optional*, defaults to `False`):
Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).
"""
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
def __setattr__(self, key, value):
if key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
super().__setattr__(key, value)
def __getattribute__(self, key):
if key != "attribute_map" and key in super().__getattribute__("attribute_map"):
key = super().__getattribute__("attribute_map")[key]
return super().__getattribute__(key)
def __init__(self, **kwargs):
# Attributes with defaults
self.return_dict = kwargs.pop("return_dict", True)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.torch_dtype = kwargs.pop("torch_dtype", None) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.cross_attention_hidden_size = kwargs.pop("cross_attention_hidden_size", None)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.typical_p = kwargs.pop("typical_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
# we will start using self.torch_dtype in v5, but to be consistent with
# from_pretrained's torch_dtype arg convert it to an actual torch.dtype object
if is_torch_available():
import torch
self.torch_dtype = getattr(torch, self.torch_dtype)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.sep_token_id = kwargs.pop("sep_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# regression / multi-label classification
self.problem_type = kwargs.pop("problem_type", None)
allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
if self.problem_type is not None and self.problem_type not in allowed_problem_types:
raise ValueError(
f"The config parameter `problem_type` was not understood: received {self.problem_type} "
"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
)
# TPU arguments
if kwargs.pop("xla_device", None) is not None:
logger.warning(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)
# Name or path to the pretrained checkpoint
self._name_or_path = str(kwargs.pop("name_or_path", ""))
# Drop the transformers version info
self.transformers_version = kwargs.pop("transformers_version", None)
# Deal with gradient checkpointing
if kwargs.get("gradient_checkpointing", False):
warnings.warn(
"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 "
"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the "
"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`."
)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@property
def name_or_path(self) -> str:
return getattr(self, "_name_or_path", None)
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
@property
def use_return_dict(self) -> bool:
"""
`bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if not hasattr(self, "id2label") or self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: f"LABEL_{i}" for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
"""
Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the
[`~PretrainedConfig.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
<Tip warning={true}>
Using `push_to_hub=True` will synchronize the repository you are pushing to with `save_directory`,
which requires `save_directory` to be a local clone of the repo you are pushing to if it's an existing
folder. Pass along `temp_dir=True` to use a temporary directory instead.
</Tip>
kwargs:
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
# If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Configuration pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
r"""
Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or
namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.
- a path to a *directory* containing a configuration file saved using the
[`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
- a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object.
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
kwargs (`Dict[str, Any]`, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Returns:
[`PretrainedConfig`]: The configuration object instantiated from this pretrained model.
Examples:
```python
# We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained(
"bert-base-uncased"
) # Download configuration from huggingface.co and cache.
config = BertConfig.from_pretrained(
"./test/saved_model/"
) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*
config = BertConfig.from_pretrained("./test/saved_model/my_configuration.json")
config = BertConfig.from_pretrained("bert-base-uncased", output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = BertConfig.from_pretrained(
"bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
)
assert config.output_attentions == True
assert unused_kwargs == {"foo": False}
```"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
[`PretrainedConfig`] using `from_dict`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
"""
original_kwargs = copy.deepcopy(kwargs)
# Get config dict associated with the base config file
config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
# That config file may point us toward another config file to use.
if "configuration_files" in config_dict:
configuration_file = get_configuration_file(config_dict["configuration_files"])
config_dict, kwargs = cls._get_config_dict(
pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs
)
return config_dict, kwargs
@classmethod
def _get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, configuration_file)
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=configuration_file, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on "
"'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having "
"permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass "
"`use_auth_token=True`."
)
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this "
f"model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for "
"available revisions."
)
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {configuration_file}."
)
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}"
)
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in the cached "
f"files and it looks like {pretrained_model_name_or_path} is not the path to a directory containing a "
"{configuration_file} file.\nCheckout your internet connection or see how to run the library in "
"offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'."
)
except EnvironmentError:
raise EnvironmentError(
f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a {configuration_file} file"
)
try:
# Load config dict
config_dict = cls._dict_from_json_file(resolved_config_file)
except (json.JSONDecodeError, UnicodeDecodeError):
raise EnvironmentError(
f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
)
if resolved_config_file == config_file:
logger.info(f"loading configuration file {config_file}")
else:
logger.info(f"loading configuration file {config_file} from cache at {resolved_config_file}")
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
"""
Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
if key != "torch_dtype":
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Model config {config}")
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
"""
Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.
Args:
json_file (`str` or `os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
# get class specific config dict
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key])
):
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
if "_auto_class" in output:
del output["_auto_class"]
# Transformers version when serializing the model
output["transformers_version"] = __version__
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
"""
Updates attributes of this class with attributes from `config_dict`.
Args:
config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
def update_from_string(self, update_str: str):
"""
Updates attributes of this class with attributes from `update_str`.
The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
The keys to change have to already exist in the config object.
Args:
update_str (`str`): String with attributes that should be updated for this class.
"""
d = dict(x.split("=") for x in update_str.split(","))
for k, v in d.items():
if not hasattr(self, k):
raise ValueError(f"key {k} isn't in the original config dict")
old_v = getattr(self, k)
if isinstance(old_v, bool):
if v.lower() in ["true", "1", "y", "yes"]:
v = True
elif v.lower() in ["false", "0", "n", "no"]:
v = False
else:
raise ValueError(f"can't derive true or false from {v} (key {k})")
elif isinstance(old_v, int):
v = int(v)
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise ValueError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)
setattr(self, k, v)
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
"""
Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
string, which can then be stored in the json format.
"""
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
for value in d.values():
if isinstance(value, dict):
self.dict_torch_dtype_to_str(value)
@classmethod
def register_for_auto_class(cls, auto_class="AutoConfig"):
"""
Register this class with a given auto class. This should only be used for custom configurations as the ones in
the library are already mapped with `AutoConfig`.
<Tip warning={true}>
This API is experimental and may have some slight breaking changes in the next releases.
</Tip>
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoConfig"`):
The auto class to register this new configuration with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
def get_configuration_file(configuration_files: List[str]) -> str:
"""
Get the configuration file to use for this version of transformers.
Args:
configuration_files (`List[str]`): The list of available configuration files.
Returns:
`str`: The configuration file to use.
"""
configuration_files_map = {}
for file_name in configuration_files:
search = _re_configuration_file.search(file_name)
if search is not None:
v = search.groups()[0]
configuration_files_map[v] = file_name
available_versions = sorted(configuration_files_map.keys())
# Defaults to FULL_CONFIGURATION_FILE and then try to look at some newer versions.
configuration_file = CONFIG_NAME
transformers_version = version.parse(__version__)
for v in available_versions:
if version.parse(v) <= transformers_version:
configuration_file = configuration_files_map[v]
else:
# No point going further since the versions are sorted.
break
return configuration_file
PretrainedConfig.push_to_hub = copy_func(PretrainedConfig.push_to_hub)
PretrainedConfig.push_to_hub.__doc__ = PretrainedConfig.push_to_hub.__doc__.format(
object="config", object_class="AutoConfig", object_files="configuration file"
)
|
import asyncio
import aiohttp
import logging
from lavaplayer.exceptions import NodeError
from .objects import (
Info,
PlayerUpdateEvent,
TrackStartEvent,
TrackEndEvent,
TrackExceptionEvent,
TrackStuckEvent,
WebSocketClosedEvent,
)
from .emitter import Emitter
import typing as t
if t.TYPE_CHECKING:
from .client import LavalinkClient
_LOGGER = logging.getLogger("lavaplayer.ws")
class WS:
def __init__(
self,
client: "LavalinkClient",
host: str,
port: int,
is_ssl: bool = False,
) -> None:
self.ws = None
self.ws_url = f"{"wss" if is_ssl else "ws"}://{host}:{port}"
self.client = client
self._headers = client._headers
self._loop = client._loop
self.emitter: Emitter = client.event_manager
self.is_connect: bool = False
async def _connect(self):
async with aiohttp.ClientSession(headers=self._headers, loop=self._loop) as session:
self.session = session
try:
self.ws = await self.session.ws_connect(self.ws_url)
if session is None:
await self.check_connection()
except (aiohttp.ClientConnectorError, aiohttp.WSServerHandshakeError, aiohttp.ServerDisconnectedError) as error:
if isinstance(error, aiohttp.ClientConnectorError):
_LOGGER.error(f"Could not connect to websocket: {error}")
_LOGGER.warning("Reconnecting to websocket after 10 seconds")
await asyncio.sleep(10)
await self._connect()
return
elif isinstance(error, aiohttp.WSServerHandshakeError):
if error.status in (403, 401): # Unauthorized or Forbidden
_LOGGER.warning("Password authentication failed - closing websocket")
return
_LOGGER.warning("Please check your websocket port - closing websocket")
elif isinstance(error, aiohttp.ServerDisconnectedError):
_LOGGER.error(f"Could not connect to websocket: {error}")
_LOGGER.warning("Reconnecting to websocket after 10 seconds")
await asyncio.sleep(10)
await self._connect()
return
_LOGGER.info("Connected to websocket")
self.is_connect = True
async for msg in self.ws:
if msg.type == aiohttp.WSMsgType.TEXT:
await self.callback(msg.json())
elif msg.type == aiohttp.WSMsgType.CLOSED:
_LOGGER.error("Websocket closed")
break
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(msg.data)
break
async def check_connection(self):
while self.ws.closed is None or not self.ws.closed or not self.is_connected:
_LOGGER.warning("Websocket closed unexpectedly - reconnecting in 10 seconds")
if self.client.nodes:
self.client.nodes.clear()
await asyncio.sleep(10)
await self._connect()
async def callback(self, payload: dict):
if payload["op"] == "stats":
self.client.info = Info(
playing_players=payload["playingPlayers"],
memory_used=payload["memory"]["used"],
memory_free=payload["memory"]["free"],
players=payload["players"],
uptime=payload["uptime"]
)
elif payload["op"] == "playerUpdate":
data = PlayerUpdateEvent(
guild_id=payload["guildId"],
time=payload["state"]["time"],
position=payload["state"].get("position"),
connected=payload["state"]["connected"],
)
self.emitter.emit("playerUpdate", data)
elif payload["op"] == "event":
if not payload.get("track"):
return
track = await self.client._decodetrack(payload["track"])
guild_id = int(payload["guildId"])
try:
node = await self.client.get_guild_node(guild_id)
except NodeError:
node = None
if payload["type"] == "TrackStartEvent":
self.emitter.emit("TrackStartEvent", TrackStartEvent(track, guild_id))
elif payload["type"] == "TrackEndEvent":
self.emitter.emit("TrackEndEvent", TrackEndEvent(track, guild_id, payload["reason"]))
if not node:
return
if not node.queue:
return
if node.repeat:
await self.client.play(guild_id, track, node.queue[0].requester, True)
return
del node.queue[0]
await self.client.set_guild_node(guild_id, node)
if len(node.queue) != 0:
await self.client.play(guild_id, node.queue[0], node.queue[0].requester, True)
elif payload["type"] == "TrackExceptionEvent":
self.emitter.emit("TrackExceptionEvent", TrackExceptionEvent(track, guild_id, payload["exception"], payload["message"], payload["severity"], payload["cause"]))
elif payload["type"] == "TrackStuckEvent":
self.emitter.emit("TrackStuckEvent", TrackStuckEvent(track, guild_id, payload["thresholdMs"]))
elif payload["type"] == "WebSocketClosedEvent":
self.emitter.emit("WebSocketClosedEvent", WebSocketClosedEvent(track, guild_id, payload["code"], payload["reason"], payload["byRemote"]))
@property
def is_connected(self) -> bool:
return self.is_connect and self.ws.closed is False
async def send(self, payload): # only dict
if not self.is_connected:
_LOGGER.error("Not connected to websocket")
await self.check_connection()
return
await self.ws.send_json(payload)
|
import asyncio
import aiohttp
import logging
from lavaplayer.exceptions import NodeError
from .objects import (
Info,
PlayerUpdateEvent,
TrackStartEvent,
TrackEndEvent,
TrackExceptionEvent,
TrackStuckEvent,
WebSocketClosedEvent,
)
from .emitter import Emitter
import typing as t
if t.TYPE_CHECKING:
from .client import LavalinkClient
_LOGGER = logging.getLogger("lavaplayer.ws")
class WS:
def __init__(
self,
client: "LavalinkClient",
host: str,
port: int,
is_ssl: bool = False,
) -> None:
self.ws = None
self.ws_url = f"{'wss' if is_ssl else 'ws'}://{host}:{port}"
self.client = client
self._headers = client._headers
self._loop = client._loop
self.emitter: Emitter = client.event_manager
self.is_connect: bool = False
async def _connect(self):
async with aiohttp.ClientSession(headers=self._headers, loop=self._loop) as session:
self.session = session
try:
self.ws = await self.session.ws_connect(self.ws_url)
if session is None:
await self.check_connection()
except (aiohttp.ClientConnectorError, aiohttp.WSServerHandshakeError, aiohttp.ServerDisconnectedError) as error:
if isinstance(error, aiohttp.ClientConnectorError):
_LOGGER.error(f"Could not connect to websocket: {error}")
_LOGGER.warning("Reconnecting to websocket after 10 seconds")
await asyncio.sleep(10)
await self._connect()
return
elif isinstance(error, aiohttp.WSServerHandshakeError):
if error.status in (403, 401): # Unauthorized or Forbidden
_LOGGER.warning("Password authentication failed - closing websocket")
return
_LOGGER.warning("Please check your websocket port - closing websocket")
elif isinstance(error, aiohttp.ServerDisconnectedError):
_LOGGER.error(f"Could not connect to websocket: {error}")
_LOGGER.warning("Reconnecting to websocket after 10 seconds")
await asyncio.sleep(10)
await self._connect()
return
_LOGGER.info("Connected to websocket")
self.is_connect = True
async for msg in self.ws:
if msg.type == aiohttp.WSMsgType.TEXT:
await self.callback(msg.json())
elif msg.type == aiohttp.WSMsgType.CLOSED:
_LOGGER.error("Websocket closed")
break
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(msg.data)
break
async def check_connection(self):
while self.ws.closed is None or not self.ws.closed or not self.is_connected:
_LOGGER.warning("Websocket closed unexpectedly - reconnecting in 10 seconds")
if self.client.nodes:
self.client.nodes.clear()
await asyncio.sleep(10)
await self._connect()
async def callback(self, payload: dict):
if payload["op"] == "stats":
self.client.info = Info(
playing_players=payload["playingPlayers"],
memory_used=payload["memory"]["used"],
memory_free=payload["memory"]["free"],
players=payload["players"],
uptime=payload["uptime"]
)
elif payload["op"] == "playerUpdate":
data = PlayerUpdateEvent(
guild_id=payload["guildId"],
time=payload["state"]["time"],
position=payload["state"].get("position"),
connected=payload["state"]["connected"],
)
self.emitter.emit("playerUpdate", data)
elif payload["op"] == "event":
if not payload.get("track"):
return
track = await self.client._decodetrack(payload["track"])
guild_id = int(payload["guildId"])
try:
node = await self.client.get_guild_node(guild_id)
except NodeError:
node = None
if payload["type"] == "TrackStartEvent":
self.emitter.emit("TrackStartEvent", TrackStartEvent(track, guild_id))
elif payload["type"] == "TrackEndEvent":
self.emitter.emit("TrackEndEvent", TrackEndEvent(track, guild_id, payload["reason"]))
if not node:
return
if not node.queue:
return
if node.repeat:
await self.client.play(guild_id, track, node.queue[0].requester, True)
return
del node.queue[0]
await self.client.set_guild_node(guild_id, node)
if len(node.queue) != 0:
await self.client.play(guild_id, node.queue[0], node.queue[0].requester, True)
elif payload["type"] == "TrackExceptionEvent":
self.emitter.emit("TrackExceptionEvent", TrackExceptionEvent(track, guild_id, payload["exception"], payload["message"], payload["severity"], payload["cause"]))
elif payload["type"] == "TrackStuckEvent":
self.emitter.emit("TrackStuckEvent", TrackStuckEvent(track, guild_id, payload["thresholdMs"]))
elif payload["type"] == "WebSocketClosedEvent":
self.emitter.emit("WebSocketClosedEvent", WebSocketClosedEvent(track, guild_id, payload["code"], payload["reason"], payload["byRemote"]))
@property
def is_connected(self) -> bool:
return self.is_connect and self.ws.closed is False
async def send(self, payload): # only dict
if not self.is_connected:
_LOGGER.error("Not connected to websocket")
await self.check_connection()
return
await self.ws.send_json(payload)
|
import base64
import logging
import re
from html import unescape as html_unescape
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HTTPStream
from streamlink.utils.parse import parse_json
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r'https?://(?:www\.)?huya\.com/(?P<channel>[^/]+)'
))
class Huya(Plugin):
_re_stream = re.compile(r'"stream"\s?:\s?"([^"]+)"')
_schema_data = validate.Schema(
{
# 'status': int,
# 'msg': validate.any(None, str),
'data': [{
'gameStreamInfoList': [{
'sCdnType': str,
'sStreamName': str,
'sFlvUrl': str,
'sFlvUrlSuffix': str,
'sFlvAntiCode': validate.all(str, validate.transform(lambda v: html_unescape(v))),
# 'sHlsUrl': str,
# 'sHlsUrlSuffix': str,
# 'sHlsAntiCode': validate.all(str, validate.transform(lambda v: html_unescape(v))),
validate.optional('iIsMultiStream'): int,
'iPCPriorityRate': int,
}]
}],
# 'vMultiStreamInfo': [{
# 'sDisplayName': str,
# 'iBitRate': int,
# }],
},
validate.get('data'),
validate.get(0),
validate.get('gameStreamInfoList'),
)
QUALITY_WEIGHTS = {}
@classmethod
def stream_weight(cls, key):
weight = cls.QUALITY_WEIGHTS.get(key)
if weight:
return weight, 'huya'
return Plugin.stream_weight(key)
def _get_streams(self):
res = self.session.http.get(self.url)
data = self._re_stream.search(res.text)
if not data:
return
data = parse_json(base64.b64decode(data.group(1)), schema=self._schema_data)
for info in data:
log.trace(f'{info!r}')
flv_url = f'{info['sFlvUrl']}/{info['sStreamName']}.{info['sFlvUrlSuffix']}?{info['sFlvAntiCode']}'
name = f'source_{info['sCdnType'].lower()}'
self.QUALITY_WEIGHTS[name] = info['iPCPriorityRate']
yield name, HTTPStream(self.session, flv_url)
log.debug(f'QUALITY_WEIGHTS: {self.QUALITY_WEIGHTS!r}')
__plugin__ = Huya
|
import base64
import logging
import re
from html import unescape as html_unescape
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HTTPStream
from streamlink.utils.parse import parse_json
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r'https?://(?:www\.)?huya\.com/(?P<channel>[^/]+)'
))
class Huya(Plugin):
_re_stream = re.compile(r'"stream"\s?:\s?"([^"]+)"')
_schema_data = validate.Schema(
{
# 'status': int,
# 'msg': validate.any(None, str),
'data': [{
'gameStreamInfoList': [{
'sCdnType': str,
'sStreamName': str,
'sFlvUrl': str,
'sFlvUrlSuffix': str,
'sFlvAntiCode': validate.all(str, validate.transform(lambda v: html_unescape(v))),
# 'sHlsUrl': str,
# 'sHlsUrlSuffix': str,
# 'sHlsAntiCode': validate.all(str, validate.transform(lambda v: html_unescape(v))),
validate.optional('iIsMultiStream'): int,
'iPCPriorityRate': int,
}]
}],
# 'vMultiStreamInfo': [{
# 'sDisplayName': str,
# 'iBitRate': int,
# }],
},
validate.get('data'),
validate.get(0),
validate.get('gameStreamInfoList'),
)
QUALITY_WEIGHTS = {}
@classmethod
def stream_weight(cls, key):
weight = cls.QUALITY_WEIGHTS.get(key)
if weight:
return weight, 'huya'
return Plugin.stream_weight(key)
def _get_streams(self):
res = self.session.http.get(self.url)
data = self._re_stream.search(res.text)
if not data:
return
data = parse_json(base64.b64decode(data.group(1)), schema=self._schema_data)
for info in data:
log.trace(f'{info!r}')
flv_url = f'{info["sFlvUrl"]}/{info["sStreamName"]}.{info["sFlvUrlSuffix"]}?{info["sFlvAntiCode"]}'
name = f'source_{info["sCdnType"].lower()}'
self.QUALITY_WEIGHTS[name] = info['iPCPriorityRate']
yield name, HTTPStream(self.session, flv_url)
log.debug(f'QUALITY_WEIGHTS: {self.QUALITY_WEIGHTS!r}')
__plugin__ = Huya
|
#!/usr/bin/env python
"""
Koala Bot Base Cog code and additional base cog functions
Commented using reStructuredText (reST)
"""
# Futures
# Built-in/Generic Imports
import os
import time
import re
import aiohttp
import logging
from concurrent.futures import ThreadPoolExecutor
logging.basicConfig(filename='TwitchAlert.log')
# Own modules
import KoalaBot
from utils.KoalaColours import *
from utils.KoalaUtils import error_embed, is_channel_in_guild, extract_id
from utils import KoalaDBManager
# Libs
from discord.ext import commands, tasks
from dotenv import load_dotenv
import asyncio
# Constants
load_dotenv()
DEFAULT_MESSAGE = ""
TWITCH_ICON = "https://cdn3.iconfinder.com/data/icons/social-messaging-ui-color-shapes-2-free" \
"/128/social-twitch-circle-512.png"
TWITCH_CLIENT_ID = os.environ.get('TWITCH_TOKEN')
TWITCH_SECRET = os.environ.get('TWITCH_SECRET')
TWITCH_USERNAME_REGEX = "^[a-z0-9][a-z0-9_]{3,24}$"
LOOP_CHECK_LIVE_DELAY = 1
TEAMS_LOOP_CHECK_LIVE_DELAY = 1
REFRESH_TEAMS_DELAY = 5
# Variables
def twitch_is_enabled(ctx):
"""
A command used to check if the guild has enabled twitch alert
e.g. @commands.check(KoalaBot.is_admin)
:param ctx: The context of the message
:return: True if admin or test, False otherwise
"""
try:
result = KoalaBot.check_guild_has_ext(ctx, "TwitchAlert")
except PermissionError:
result = False
return result
class TwitchAlert(commands.Cog):
"""
A discord.py cog for alerting when someone goes live on twitch
"""
def __init__(self, bot, database_manager=None):
"""
Initialises local variables
:param bot: The bot client for this cog
"""
if not database_manager:
database_manager = KoalaBot.database_manager
self.bot = bot
database_manager.create_base_tables()
database_manager.insert_extension("TwitchAlert", 0, True, True)
self.ta_database_manager = TwitchAlertDBManager(database_manager, bot)
self.ta_database_manager.create_tables()
self.loop_thread = None
self.loop_team_thread = None
self.running = False
self.stop_loop = False
@commands.command(name="twitchEditMsg", aliases=["edit_default_message"])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def edit_default_message(self, ctx, raw_channel_id, *default_live_message):
"""
Edit the default message put in a Twitch Alert Notification
:param ctx: The discord context of the command
:param raw_channel_id: The channel ID where the twitch alert is being used
:param default_live_message: The default live message of users within this Twitch Alert,
leave empty for program default
:return:
"""
try:
channel_id = extract_id(raw_channel_id)
except TypeError:
channel_id = ctx.message.channel.id
default_live_message = (raw_channel_id,) + default_live_message
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
# Assigning default message if provided
if default_live_message is not None and default_live_message != (None,):
default_message = " ".join(default_live_message)
if len(default_message) > 1000:
await ctx.send(embed=error_embed(
"custom_message is too long, try something with less than 1000 characters"))
return
else:
default_message = None
# Creates a new Twitch Alert with the used guild ID and default message if provided
default_message = self.ta_database_manager.new_ta(ctx.message.guild.id, channel_id, default_message,
replace=True)
# Returns an embed with information altered
new_embed = discord.Embed(title="Default Message Edited", colour=KOALA_GREEN,
description=f"Guild: {ctx.message.guild.id}\n"
f"Channel: {channel_id}\n"
f"Default Message: {default_message}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchViewMsg", aliases=["view_default_message"])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def view_default_message(self, ctx, raw_channel_id=None):
"""
Shows the current default message for Twitch Alerts
:param ctx: The discord context of the command
:param raw_channel_id: The channel ID where the twitch alert is being used
leave empty for program default
:return:
"""
if raw_channel_id is None:
channel_id = ctx.message.channel.id
else:
channel_id = extract_id(raw_channel_id)
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
# Creates a new Twitch Alert with the used guild ID and default message if provided
default_message = self.ta_database_manager.get_default_message(channel_id)[0][0]
# Returns an embed with information altered
new_embed = discord.Embed(title="Default Message", colour=KOALA_GREEN,
description=f"Guild: {ctx.message.guild.id}\n"
f"Channel: {channel_id}\n"
f"Default Message: {default_message}")
# new_embed.set_footer(text=f"Twitch Alert ID: {new_id}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchAdd", aliases=['add_user_to_twitch_alert'])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def add_user_to_twitch_alert(self, ctx, raw_channel_id, twitch_username=None, *custom_live_message):
"""
Add a Twitch user to a Twitch Alert
:param ctx: The discord context of the command
:param raw_channel_id: The channel ID where the twitch alert is being used
:param twitch_username: The Twitch Username of the user being added (lowercase)
:param custom_live_message: the custom live message for this user's alert
:return:
"""
try:
channel_id = extract_id(raw_channel_id)
except TypeError:
custom_live_message = (twitch_username,) + custom_live_message
twitch_username = raw_channel_id
channel_id = ctx.message.channel.id
if twitch_username is None:
raise discord.errors.InvalidArgument("twitch_username is a required argument that is missing.")
elif not re.search(TWITCH_USERNAME_REGEX, twitch_username):
raise discord.errors.InvalidArgument(
"The given twitch_username is not a valid username (please use lowercase)")
# Check the channel specified is in this guild
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
default_message = self.ta_database_manager.new_ta(ctx.message.guild.id, channel_id)
# Setting the custom message as required
if custom_live_message is not None and custom_live_message != (None,):
custom_message = " ".join(custom_live_message)
default_message = custom_message
if len(default_message) > 1000:
await ctx.send(embed=error_embed(
"custom_message is too long, try something with less than 1000 characters"))
return
else:
custom_message = None
self.ta_database_manager.add_user_to_ta(channel_id, twitch_username, custom_message, ctx.message.guild.id)
# Response Message
new_embed = discord.Embed(title="Added User to Twitch Alert", colour=KOALA_GREEN,
description=f"Channel: {channel_id}\n"
f"User: {twitch_username}\n"
f"Message: {default_message}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchRemove", aliases=['remove_user_from_twitch_alert'])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def remove_user_from_twitch_alert(self, ctx, raw_channel_id, twitch_username=None):
"""
Removes a user from a Twitch Alert
:param ctx: the discord context
:param raw_channel_id: The discord channel ID of the Twitch Alert
:param twitch_username: The username of the user to be removed
:return:
"""
try:
channel_id = extract_id(raw_channel_id)
except TypeError:
twitch_username = raw_channel_id
channel_id = ctx.message.channel.id
if twitch_username is None:
raise discord.errors.InvalidArgument("twitch_username is a required argument that is missing.")
# Check the channel specified is in this guild
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
await self.ta_database_manager.remove_user_from_ta(channel_id, twitch_username)
# Response Message
new_embed = discord.Embed(title="Removed User from Twitch Alert", colour=KOALA_GREEN,
description=f"Channel: {channel_id}\n"
f"User: {twitch_username}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchAddTeam", aliases=["add_team_to_twitch_alert"])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def add_team_to_twitch_alert(self, ctx, raw_channel_id, team_name=None, *custom_live_message):
"""
Add a Twitch team to a Twitch Alert
:param ctx: The discord context of the command
:param raw_channel_id: The channel ID where the twitch alert is being used
:param team_name: The Twitch team being added (lowercase)
:param custom_live_message: the custom live message for this team's alert
:return:
"""
try:
channel_id = extract_id(raw_channel_id)
except TypeError:
custom_live_message = (team_name,) + custom_live_message
team_name = raw_channel_id
channel_id = ctx.message.channel.id
if team_name is None:
raise discord.errors.InvalidArgument("team_name is a required argument that is missing.")
elif not re.search(TWITCH_USERNAME_REGEX, team_name):
raise discord.errors.InvalidArgument(
"The given team_name is not a valid twitch team name (please use lowercase)")
# Check the channel specified is in this guild
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
self.ta_database_manager.new_ta(ctx.message.guild.id, channel_id)
# Setting the custom message as required
if custom_live_message is not None and custom_live_message != (None,):
default_message = " ".join(custom_live_message)
if len(default_message) > 1000:
await ctx.send(embed=error_embed(
"custom_message is too long, try something with less than 1000 characters"))
return
else:
default_message = DEFAULT_MESSAGE
self.ta_database_manager.add_team_to_ta(channel_id, team_name, default_message, ctx.message.guild.id)
# Response Message
new_embed = discord.Embed(title="Added Team to Twitch Alert", colour=KOALA_GREEN,
description=f"Channel: {channel_id}\n"
f"Team: {team_name}\n"
f"Message: {default_message}")
# new_embed.set_footer(text=f"Twitch Alert ID: {channel_id}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchRemoveTeam", aliases=["remove_team_from_twitch_alert"])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def remove_team_from_twitch_alert(self, ctx, raw_channel_id, team_name=None):
"""
Removes a team from a Twitch Alert
:param ctx: the discord context
:param raw_channel_id: The discord channel ID of the Twitch Alert
:param team_name: The Twitch team being added (lowercase)
:return:
"""
try:
channel_id = extract_id(raw_channel_id)
except TypeError:
team_name = raw_channel_id
channel_id = ctx.message.channel.id
if team_name is None:
raise discord.errors.InvalidArgument("team_name is a required argument that is missing.")
# Check the channel specified is in this guild
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
await self.ta_database_manager.remove_team_from_ta(channel_id, team_name)
# Response Message
new_embed = discord.Embed(title="Removed Team from Twitch Alert", colour=KOALA_GREEN,
description=f"Channel: {channel_id}\n"
f"Team: {team_name}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchList", aliases=["list_twitch_alert"])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def list_twitch_alert(self, ctx, raw_channel_id=None):
"""
Shows all current TwitchAlert users and teams in a channel
:param ctx:
:param raw_channel_id:
:return:
"""
if raw_channel_id is None:
channel_id = ctx.message.channel.id
else:
channel_id = extract_id(raw_channel_id)
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
embed = discord.Embed()
embed.title = "Twitch Alerts"
embed.colour = KOALA_GREEN
embed.set_footer(text=f"Channel ID: {channel_id}")
results = self.ta_database_manager.get_users_in_ta(channel_id)
if results:
users = ""
for result in results:
users += f"{result[0]}\n"
embed.add_field(name=":bust_in_silhouette: Users", value=users)
else:
embed.add_field(name=":bust_in_silhouette: Users", value="None")
results = self.ta_database_manager.get_teams_in_ta(channel_id)
if results:
teams = ""
for result in results:
teams += f"{result[0]}\n"
embed.add_field(name=":busts_in_silhouette: Teams", value=teams)
else:
embed.add_field(name=":busts_in_silhouette: Teams", value="None")
await ctx.send(embed=embed)
@commands.Cog.listener()
async def on_ready(self):
"""
When the bot is started up, the loop begins
:return:
"""
if not self.running:
self.start_loops()
def start_loops(self):
self.loop_update_teams.start()
self.loop_check_team_live.start()
self.loop_check_live.start()
self.running = True
def end_loops(self):
self.loop_update_teams.cancel()
self.loop_check_team_live.cancel()
self.loop_check_live.cancel()
self.running = False
@tasks.loop(minutes=LOOP_CHECK_LIVE_DELAY)
async def loop_check_live(self):
"""
A loop that continually checks the live status of users and
sends alerts when online, removing them when offline
:return:
"""
start = time.time()
# logging.info("TwitchAlert: User Loop Started")
sql_find_users = "SELECT twitch_username " \
"FROM UserInTwitchAlert " \
"JOIN TwitchAlerts TA on UserInTwitchAlert.channel_id = TA.channel_id " \
"JOIN (SELECT extension_id, guild_id FROM GuildExtensions " \
"WHERE extension_id = 'TwitchAlert' OR extension_id = 'All') GE on TA.guild_id = GE.guild_id;"
users = self.ta_database_manager.database_manager.db_execute_select(sql_find_users)
usernames = []
for user in users:
if not re.search(TWITCH_USERNAME_REGEX, user[0]):
sql_remove_invalid_user = "DELETE FROM UserInTwitchAlert WHERE twitch_username = ?"
self.ta_database_manager.database_manager.db_execute_commit(sql_remove_invalid_user, args=[user[0]])
else:
usernames.append(user[0])
# user_streams = self.ta_database_manager.twitch_handler.get_streams_data(usernames)
if not usernames:
return
user_streams = await self.ta_database_manager.twitch_handler.get_streams_data(usernames)
if user_streams is None:
return
# Deals with online streams
for streams_details in user_streams:
try:
if streams_details.get('type') == "live":
current_username = str.lower(streams_details.get("user_name"))
usernames.remove(current_username)
sql_find_message_id = \
"SELECT UserInTwitchAlert.channel_id, message_id, custom_message, default_message " \
"FROM UserInTwitchAlert " \
"JOIN TwitchAlerts TA on UserInTwitchAlert.channel_id = TA.channel_id " \
"JOIN (SELECT extension_id, guild_id FROM GuildExtensions " \
"WHERE extension_id = 'TwitchAlert' " \
" OR extension_id = 'All') GE on TA.guild_id = GE.guild_id " \
"WHERE twitch_username = ?;"
results = self.ta_database_manager.database_manager.db_execute_select(
sql_find_message_id, args=[current_username])
new_message_embed = None
for result in results:
channel_id = result[0]
message_id = result[1]
custom_message = result[2]
channel_default_message = result[3]
channel = self.bot.get_channel(id=channel_id)
try:
# If no Alert is posted
if message_id is None:
if new_message_embed is None:
if custom_message is not None:
message = custom_message
else:
message = channel_default_message
new_message_embed = await self.create_alert_embed(streams_details, message)
if new_message_embed is not None and channel is not None:
new_message = await channel.send(embed=new_message_embed)
sql_update_message_id = """
UPDATE UserInTwitchAlert
SET message_id = ?
WHERE channel_id = ?
AND twitch_username = ?"""
self.ta_database_manager.database_manager.db_execute_commit(
sql_update_message_id, args=[new_message.id, result[0], current_username])
except discord.errors.Forbidden as err:
logging.warning(f"TwitchAlert: {err} Name: {channel} ID: {channel.id}")
sql_remove_invalid_channel = "DELETE FROM TwitchAlerts WHERE channel_id = ?"
self.ta_database_manager.database_manager.db_execute_commit(sql_remove_invalid_channel,
args=[channel.id])
except Exception as err:
logging.error(f"TwitchAlert: User Loop error {err}")
# Deals with remaining offline streams
await self.ta_database_manager.delete_all_offline_streams(False, usernames)
time_diff = time.time() - start
if time_diff > 5:
logging.warning(f"TwitchAlert: User Loop Finished in > 5s | {time_diff}s")
async def create_alert_embed(self, stream_data, message):
"""
Creates and sends an alert message
:param stream_data: The twitch stream data to have in the message
:param message: The custom message to be added as a description
:return: The discord message id of the sent message
"""
user_details = await self.ta_database_manager.twitch_handler.get_user_data(
stream_data.get("user_name"))
game_details = await self.ta_database_manager.twitch_handler.get_game_data(
stream_data.get("game_id"))
return create_live_embed(stream_data, user_details, game_details, message)
@tasks.loop(minutes=REFRESH_TEAMS_DELAY)
async def loop_update_teams(self):
start = time.time()
# logging.info("TwitchAlert: Started Update Teams")
await self.ta_database_manager.update_all_teams_members()
time_diff = time.time() - start
if time_diff > 5:
logging.warning(f"TwitchAlert: Teams updated in > 5s | {time_diff}s")
@tasks.loop(minutes=TEAMS_LOOP_CHECK_LIVE_DELAY)
async def loop_check_team_live(self):
"""
A loop to repeatedly send messages if a member of a team is live, and remove it when they are not
:return:
"""
start = time.time()
# logging.info("TwitchAlert: Team Loop Started")
sql_select_team_users = "SELECT twitch_username, twitch_team_name " \
"FROM UserInTwitchTeam " \
"JOIN TeamInTwitchAlert TITA " \
" ON UserInTwitchTeam.team_twitch_alert_id = TITA.team_twitch_alert_id " \
"JOIN TwitchAlerts TA on TITA.channel_id = TA.channel_id " \
"JOIN (SELECT extension_id, guild_id FROM GuildExtensions " \
"WHERE extension_id = 'TwitchAlert' " \
" OR extension_id = 'All') GE on TA.guild_id = GE.guild_id "
users_and_teams = self.ta_database_manager.database_manager.db_execute_select(sql_select_team_users)
usernames = []
for user in users_and_teams:
if not re.search(TWITCH_USERNAME_REGEX, user[1]):
sql_remove_invalid_user = "DELETE FROM TeamInTwitchAlert WHERE twitch_team_name = ?"
self.ta_database_manager.database_manager.db_execute_commit(sql_remove_invalid_user, args=[user[1]])
else:
usernames.append(user[0])
if not usernames:
return
streams_data = await self.ta_database_manager.twitch_handler.get_streams_data(usernames)
if streams_data is None:
return
# Deals with online streams
for stream_data in streams_data:
try:
if stream_data.get('type') == "live":
current_username = str.lower(stream_data.get("user_name"))
usernames.remove(current_username)
sql_find_message_id = """
SELECT TITA.channel_id, UserInTwitchTeam.message_id, TITA.team_twitch_alert_id, custom_message,
default_message
FROM UserInTwitchTeam
JOIN TeamInTwitchAlert TITA on UserInTwitchTeam.team_twitch_alert_id = TITA.team_twitch_alert_id
JOIN TwitchAlerts TA on TITA.channel_id = TA.channel_id
JOIN (SELECT extension_id, guild_id
FROM GuildExtensions
WHERE extension_id = 'TwitchAlert' OR extension_id = 'All') GE ON TA.guild_id = GE.guild_id
WHERE twitch_username = ?"""
results = self.ta_database_manager.database_manager.db_execute_select(
sql_find_message_id, args=[current_username])
new_message_embed = None
for result in results:
channel_id = result[0]
message_id = result[1]
team_twitch_alert_id = result[2]
custom_message = result[3]
channel_default_message = result[4]
channel = self.bot.get_channel(id=channel_id)
try:
# If no Alert is posted
if message_id is None:
if new_message_embed is None:
if custom_message is not None:
message = custom_message
else:
message = channel_default_message
new_message_embed = await self.create_alert_embed(stream_data, message)
if new_message_embed is not None and channel is not None:
new_message = await channel.send(embed=new_message_embed)
sql_update_message_id = """
UPDATE UserInTwitchTeam
SET message_id = ?
WHERE team_twitch_alert_id = ?
AND twitch_username = ?"""
self.ta_database_manager.database_manager.db_execute_commit(
sql_update_message_id,
args=[new_message.id, team_twitch_alert_id, current_username])
except discord.errors.Forbidden as err:
logging.warning(f"TwitchAlert: {err} Name: {channel} ID: {channel.id}")
sql_remove_invalid_channel = "DELETE FROM TwitchAlerts WHERE channel_id = ?"
self.ta_database_manager.database_manager.db_execute_commit(sql_remove_invalid_channel,
args=[channel.id])
except Exception as err:
logging.error(f"TwitchAlert: Team Loop error {err}")
# Deals with remaining offline streams
await self.ta_database_manager.delete_all_offline_streams(True, usernames)
time_diff = time.time() - start
if time_diff > 5:
logging.warning(f"TwitchAlert: Teams Loop Finished in > 5s | {time_diff}s")
def create_live_embed(stream_info, user_info, game_info, message):
"""
Creates an embed for the go live announcement
:param stream_info: The stream data from the Twitch API
:param user_info: The user data for this streamer from the Twitch API
:param game_info: The game data for this game from the Twitch API
:param message: The custom message to be added as a description
:return: The embed created
"""
embed = discord.Embed(colour=KOALA_GREEN)
if message is not None and message != "":
embed.description = message
embed.set_author(name=stream_info.get("user_name") + " is now streaming!",
icon_url=TWITCH_ICON)
embed.title = "https://twitch.tv/" + str.lower(stream_info.get("user_name"))
embed.add_field(name="Stream Title", value=stream_info.get("title"))
if game_info is None:
embed.add_field(name="Playing", value="No Category")
else:
embed.add_field(name="Playing", value=game_info.get("name"))
embed.set_thumbnail(url=user_info.get("profile_image_url"))
return embed
class TwitchAPIHandler:
"""
A wrapper to interact with the twitch API
"""
def __init__(self, client_id: str, client_secret: str):
self.client_id = client_id
self.client_secret = client_secret
self.params = {'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'client_credentials'}
self.token = {}
@property
def base_headers(self):
return {
'Authorization': f'Bearer {self.token.get('access_token')}',
'Client-ID': self.client_id
}
async def get_new_twitch_oauth(self):
"""
Get a new OAuth2 token from twitch using client_id and client_secret
:return: The new OAuth2 token
"""
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(60)) as client:
async with client.post('https://id.twitch.tv/oauth2/token', params=self.params) as response:
if response.status > 399:
logging.critical(f'TwitchAlert: Error {response.status} while getting Oauth token')
self.token = {}
response_json = await response.json()
try:
response_json['expires_in'] += time.time()
except KeyError:
# probably shouldn't need this, but catch just in case
logging.warning('TwitchAlert: Failed to set token expiration time')
self.token = response_json
return self.token
async def requests_get(self, url, headers=None, params=None):
"""
Gets a response from a curl get request to the given url using headers of this object
:param headers: the Headers required for the request, will use self.headers by default
:param url: The URL to send the request to
:param params: The parameters of the request
:return: The response of the request
"""
if self.token.get('expires_in', 0) <= time.time() + 1 or not self.token:
await self.get_new_twitch_oauth()
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(60)) as client:
async with client.get(url=url, headers=headers if headers else self.base_headers, params=params) as \
response:
if response.status == 401:
logging.info(f"TwitchAlert: {response.status}, getting new oauth and retrying")
await self.get_new_twitch_oauth()
return await self.requests_get(url, headers, params)
elif response.status > 399:
logging.warning(f'TwitchAlert: {response.status} while getting requesting URL:{url}')
return await response.json()
async def get_streams_data(self, usernames):
"""
Gets all stream information from a list of given usernames
:param usernames: The list of usernames
:return: The JSON data of the request
"""
url = 'https://api.twitch.tv/helix/streams?'
next_hundred_users = usernames[:100]
usernames = usernames[100:]
result = (await self.requests_get(url + "user_login=" + "&user_login=".join(next_hundred_users))).get("data")
while usernames:
next_hundred_users = usernames[:100]
usernames = usernames[100:]
result += (await self.requests_get(url + "user_login=" + "&user_login=".join(next_hundred_users))).get(
"data")
return result
async def get_user_data(self, username):
"""
Gets the user information of a given user
:param username: The display twitch username of the user
:return: The JSON information of the user's data
"""
url = 'https://api.twitch.tv/helix/users?login=' + username
return (await self.requests_get(url)).get("data")[0]
async def get_game_data(self, game_id):
"""
Gets the game information of a given game
:param game_id: The twitch game ID of a game
:return: The JSON information of the game's data
"""
if game_id != "":
url = 'https://api.twitch.tv/helix/games?id=' + game_id
game_data = await self.requests_get(url)
return game_data.get("data")[0]
else:
return None
async def get_team_users(self, team_id):
"""
Gets the users data about a given team
:param team_id: The team name of the twitch team
:return: the JSON information of the users
"""
url = 'https://api.twitch.tv/helix/teams?name=' + team_id
return (
await self.requests_get(url)).get("data")[0].get("users")
class TwitchAlertDBManager:
"""
A class for interacting with the Koala twitch database
"""
def __init__(self, database_manager: KoalaDBManager.KoalaDBManager, bot_client: discord.client):
"""
Initialises local variables
:param database_manager:
:param bot_client:
"""
self.database_manager = database_manager
self.twitch_handler = TwitchAPIHandler(TWITCH_CLIENT_ID, TWITCH_SECRET)
self.bot = bot_client
def get_parent_database_manager(self):
"""
A getter for the database manager of this object
:return:
"""
return self.database_manager
def create_tables(self):
"""
Creates all the tables associated with the twitch alert extension
:return:
"""
# TwitchAlerts
sql_create_twitch_alerts_table = """
CREATE TABLE IF NOT EXISTS TwitchAlerts (
guild_id integer NOT NULL,
channel_id integer NOT NULL,
default_message text NOT NULL,
PRIMARY KEY (guild_id, channel_id),
CONSTRAINT fk_guild
FOREIGN KEY (guild_id)
REFERENCES GuildExtensions (guild_id)
ON DELETE CASCADE
);"""
# UserInTwitchAlert
sql_create_user_in_twitch_alert_table = """
CREATE TABLE IF NOT EXISTS UserInTwitchAlert (
channel_id integer NOT NULL,
twitch_username text NOT NULL,
custom_message text,
message_id integer,
PRIMARY KEY (channel_id, twitch_username),
CONSTRAINT fk_channel
FOREIGN KEY (channel_id)
REFERENCES TwitchAlerts (channel_id)
ON DELETE CASCADE
);"""
# TeamInTwitchAlert
sql_create_team_in_twitch_alert_table = """
CREATE TABLE IF NOT EXISTS TeamInTwitchAlert (
team_twitch_alert_id integer PRIMARY KEY AUTOINCREMENT,
channel_id integer NOT NULL,
twitch_team_name text NOT NULL,
custom_message text,
CONSTRAINT fk_channel
FOREIGN KEY (channel_id)
REFERENCES TwitchAlerts (channel_id)
ON DELETE CASCADE
);"""
# UserInTwitchTeam
sql_create_user_in_twitch_team_table = """
CREATE TABLE IF NOT EXISTS UserInTwitchTeam (
team_twitch_alert_id text NOT NULL,
twitch_username text NOT NULL,
message_id integer,
PRIMARY KEY (team_twitch_alert_id, twitch_username),
CONSTRAINT fk_twitch_team_alert
FOREIGN KEY (team_twitch_alert_id)
REFERENCES TeamInTwitchAlert (team_twitch_alert_id)
ON DELETE CASCADE
);"""
# Create Tables
self.database_manager.db_execute_commit(sql_create_twitch_alerts_table)
self.database_manager.db_execute_commit(sql_create_user_in_twitch_alert_table)
self.database_manager.db_execute_commit(sql_create_team_in_twitch_alert_table)
self.database_manager.db_execute_commit(sql_create_user_in_twitch_team_table)
def new_ta(self, guild_id, channel_id, default_message=None, replace=False):
"""
Creates a new Twitch Alert and gives the ID associated with it
:param guild_id: The discord guild ID where the Twitch Alert is located
:param channel_id: The discord channel ID of the twitch Alert
:param default_message: The default message of users in the Twitch Alert
:param replace: True if the new ta should replace the current if exists
:return: The new default_message
"""
sql_find_ta = "SELECT default_message FROM TwitchAlerts WHERE channel_id=?"
message = self.database_manager.db_execute_select(sql_find_ta, args=[channel_id])
if message and not replace:
return message[0][0]
# Sets the default message if not provided
if default_message is None:
default_message = DEFAULT_MESSAGE
# Insert new Twitch Alert to database
if replace:
sql_insert_twitch_alert = """
REPLACE INTO TwitchAlerts(guild_id, channel_id, default_message)
VALUES(?,?,?)
"""
else:
sql_insert_twitch_alert = """
INSERT INTO TwitchAlerts(guild_id, channel_id, default_message)
VALUES(?,?,?)
"""
self.database_manager.db_execute_commit(sql_insert_twitch_alert, args=[guild_id, channel_id, default_message])
return default_message
def get_default_message(self, channel_id):
"""
Get the set default message for the twitch alert
:param channel_id: The discord channel ID of the twitch Alert
:return: The current default_message
"""
sql_find_ta = "SELECT default_message FROM TwitchAlerts WHERE channel_id= ?"
return self.database_manager.db_execute_select(sql_find_ta, args=[channel_id])
def add_user_to_ta(self, channel_id, twitch_username, custom_message, guild_id=None):
"""
Add a twitch user to a given Twitch Alert
:param channel_id: The discord channel ID of the twitch Alert
:param twitch_username: The Twitch username of the user to be added
:param custom_message: The custom Message of the user's live notification.
None = use default Twitch Alert message
:param guild_id: The guild ID of the channel
:return:
:raises: KeyError if channel ID is not defined in TwitchAlerts and guild_id is not provided
"""
self.new_ta(guild_id, channel_id)
if custom_message:
sql_insert_user_twitch_alert = """
INSERT INTO UserInTwitchAlert(channel_id, twitch_username, custom_message)
VALUES(?, ?, ?)
"""
self.database_manager.db_execute_commit(
sql_insert_user_twitch_alert, args=[channel_id, str.lower(twitch_username), custom_message])
else:
sql_insert_user_twitch_alert = """
INSERT INTO UserInTwitchAlert(channel_id, twitch_username)
VALUES(?, ?)
"""
self.database_manager.db_execute_commit(
sql_insert_user_twitch_alert, args=[channel_id, str.lower(twitch_username)])
async def remove_user_from_ta(self, channel_id, twitch_username):
"""
Removes a user from a given Twitch Alert
:param channel_id: The discord channel ID of the twitch Alert
:param twitch_username: The Twitch username of the user to be added
:return:
"""
sql_get_message_id = "SELECT message_id " \
"FROM UserInTwitchAlert " \
"WHERE twitch_username = ? " \
"AND channel_id = ? "
message_id = self.database_manager.db_execute_select(sql_get_message_id,
args=[twitch_username, channel_id])[0][0]
if message_id is not None:
await self.delete_message(message_id, channel_id)
sql_remove_entry = """DELETE FROM UserInTwitchAlert
WHERE twitch_username = ? AND channel_id = ?"""
self.database_manager.db_execute_commit(sql_remove_entry, args=[twitch_username, channel_id])
async def delete_message(self, message_id, channel_id):
"""
Deletes a given discord message
:param message_id: discord message ID of the message to delete
:param channel_id: discord channel ID which has the message
:return:
"""
try:
channel = self.bot.get_channel(int(channel_id))
if channel is None:
logging.warning(f"TwitchAlert: Channel ID {channel_id} does not exist, removing from database")
sql_remove_invalid_channel = "DELETE FROM TwitchAlerts WHERE channel_id = ?"
self.database_manager.db_execute_commit(sql_remove_invalid_channel, args=[channel_id])
return
message = await channel.fetch_message(message_id)
await message.delete()
except discord.errors.NotFound as err:
logging.warning(f"TwitchAlert: Message ID {message_id} does not exist, skipping \nError: {err}")
except discord.errors.Forbidden as err:
logging.warning(f"TwitchAlert: {err} Channel ID: {channel_id}")
sql_remove_invalid_channel = "DELETE FROM TwitchAlerts WHERE channel_id = ?"
self.database_manager.db_execute_commit(sql_remove_invalid_channel, args=[channel_id])
def get_users_in_ta(self, channel_id):
"""
Returns all users in a given Twitch Alert
:param channel_id: The channel ID of the Twitch Alert
:return: The sql results of the users
"""
sql_get_users = "SELECT twitch_username FROM UserInTwitchAlert WHERE channel_id = ?"
return self.database_manager.db_execute_select(sql_get_users, args=[channel_id])
def get_teams_in_ta(self, channel_id):
"""
Returns all teams in a given Twitch Alert
:param channel_id: The channel ID of the Twitch Alert
:return: The sql results of the teams
"""
sql_get_teams = "SELECT twitch_team_name FROM TeamInTwitchAlert WHERE channel_id = ?"
return self.database_manager.db_execute_select(sql_get_teams, args=[channel_id])
def add_team_to_ta(self, channel_id, twitch_team, custom_message, guild_id=None):
"""
Add a twitch team to a given Twitch Alert
:param channel_id: The discord channel ID of the twitch Alert
:param twitch_team: The Twitch team to be added
:param custom_message: The custom Message of the team's live notification.
None = use default Twitch Alert message
:param guild_id: The guild ID of the channel
:return:
:raises: KeyError if channel ID is not defined in TwitchAlerts and guild_id is not provided
"""
self.new_ta(guild_id, channel_id)
if custom_message:
sql_insert_team_twitch_alert = """
INSERT INTO TeamInTwitchAlert(channel_id, twitch_team_name, custom_message)
VALUES(?, ?, ?)
"""
self.database_manager.db_execute_commit(
sql_insert_team_twitch_alert, args=[channel_id, str.lower(twitch_team), custom_message])
else:
sql_insert_team_twitch_alert = """
INSERT INTO TeamInTwitchAlert(channel_id, twitch_team_name)
VALUES(?, ?)
"""
self.database_manager.db_execute_commit(
sql_insert_team_twitch_alert, args=[channel_id, str.lower(twitch_team)])
async def remove_team_from_ta(self, channel_id, team_name):
"""
Removes a team from a given twitch alert
:param channel_id: The channel ID of the Twitch Alert
:param team_name: The team name of the team to be removed
:return:
"""
sql_get_team_alert_id = "SELECT team_twitch_alert_id " \
"FROM TeamInTwitchAlert " \
"WHERE twitch_team_name = ? " \
" AND channel_id = ?"
result = self.database_manager.db_execute_select(sql_get_team_alert_id, args=[team_name, channel_id])
if not result:
raise AttributeError("Team name not found")
team_alert_id = result[0][0]
sql_get_message_id = """SELECT UserInTwitchTeam.message_id
FROM UserInTwitchTeam
WHERE team_twitch_alert_id = ?"""
message_ids = self.database_manager.db_execute_select(sql_get_message_id, args=[team_alert_id])
if message_ids is not None:
for message_id in message_ids:
if message_id[0] is not None:
await self.delete_message(message_id[0], channel_id)
sql_remove_users = """DELETE FROM UserInTwitchTeam WHERE team_twitch_alert_id = ?"""
sql_remove_team = """DELETE FROM TeamInTwitchAlert WHERE team_twitch_alert_id = ?"""
self.database_manager.db_execute_commit(sql_remove_users, args=[team_alert_id])
self.database_manager.db_execute_commit(sql_remove_team, args=[team_alert_id])
async def update_team_members(self, twitch_team_id, team_name):
"""
Users in a team are updated to ensure they are assigned to the correct team
:param twitch_team_id: the team twitch alert id
:param team_name: the name of the team
:return:
"""
if re.search(TWITCH_USERNAME_REGEX, team_name):
users = await self.twitch_handler.get_team_users(team_name)
for user in users:
sql_add_user = """INSERT OR IGNORE INTO UserInTwitchTeam(team_twitch_alert_id, twitch_username)
VALUES(?, ?)"""
try:
self.database_manager.db_execute_commit(sql_add_user, args=[twitch_team_id, user.get("user_login")],
pass_errors=True)
except KoalaDBManager.sqlite3.IntegrityError as err:
logging.error(f"Twitch Alert: 1034: {err}")
pass
async def update_all_teams_members(self):
"""
Updates all teams with the current team members
:return:
"""
sql_get_teams = """SELECT team_twitch_alert_id, twitch_team_name FROM TeamInTwitchAlert"""
teams_info = self.database_manager.db_execute_select(sql_get_teams)
for team_info in teams_info:
await self.update_team_members(team_info[0], team_info[1])
async def delete_all_offline_streams(self, team: bool, usernames):
"""
A method that deletes all currently offline streams
:param team: True if the users are from teams, false if individuals
:param usernames: The usernames of the team members
:return:
"""
if team:
sql_select_offline_streams_with_message_ids = f"""
SELECT channel_id, message_id
FROM UserInTwitchTeam
JOIN TeamInTwitchAlert TITA on UserInTwitchTeam.team_twitch_alert_id = TITA.team_twitch_alert_id
WHERE message_id NOT NULL
AND twitch_username in ({','.join(['?'] * len(usernames))})"""
sql_update_offline_streams = f"""
UPDATE UserInTwitchTeam
SET message_id = NULL
WHERE twitch_username in ({','.join(['?'] * len(usernames))})"""
else:
sql_select_offline_streams_with_message_ids = f"""
SELECT channel_id, message_id
FROM UserInTwitchAlert
WHERE message_id NOT NULL
AND twitch_username in ({','.join(['?'] * len(usernames))})"""
sql_update_offline_streams = f"""
UPDATE UserInTwitchAlert
SET message_id = NULL
WHERE twitch_username in ({','.join(['?'] * len(usernames))})"""
results = self.database_manager.db_execute_select(
sql_select_offline_streams_with_message_ids, usernames)
for result in results:
await self.delete_message(result[1], result[0])
self.database_manager.db_execute_commit(sql_update_offline_streams, usernames)
def setup(bot: KoalaBot) -> None:
"""
Load this cog to the KoalaBot.
:param bot: the bot client for KoalaBot
"""
if TWITCH_SECRET is None or TWITCH_CLIENT_ID is None:
logging.error("TwitchAlert not started. API keys not found in environment.")
print("TwitchAlert not started. API keys not found in environment.")
KoalaBot.database_manager.insert_extension("TwitchAlert", 0, False, False)
else:
bot.add_cog(TwitchAlert(bot))
logging.info("TwitchAlert is ready.")
print("TwitchAlert is ready.")
|
#!/usr/bin/env python
"""
Koala Bot Base Cog code and additional base cog functions
Commented using reStructuredText (reST)
"""
# Futures
# Built-in/Generic Imports
import os
import time
import re
import aiohttp
import logging
from concurrent.futures import ThreadPoolExecutor
logging.basicConfig(filename='TwitchAlert.log')
# Own modules
import KoalaBot
from utils.KoalaColours import *
from utils.KoalaUtils import error_embed, is_channel_in_guild, extract_id
from utils import KoalaDBManager
# Libs
from discord.ext import commands, tasks
from dotenv import load_dotenv
import asyncio
# Constants
load_dotenv()
DEFAULT_MESSAGE = ""
TWITCH_ICON = "https://cdn3.iconfinder.com/data/icons/social-messaging-ui-color-shapes-2-free" \
"/128/social-twitch-circle-512.png"
TWITCH_CLIENT_ID = os.environ.get('TWITCH_TOKEN')
TWITCH_SECRET = os.environ.get('TWITCH_SECRET')
TWITCH_USERNAME_REGEX = "^[a-z0-9][a-z0-9_]{3,24}$"
LOOP_CHECK_LIVE_DELAY = 1
TEAMS_LOOP_CHECK_LIVE_DELAY = 1
REFRESH_TEAMS_DELAY = 5
# Variables
def twitch_is_enabled(ctx):
"""
A command used to check if the guild has enabled twitch alert
e.g. @commands.check(KoalaBot.is_admin)
:param ctx: The context of the message
:return: True if admin or test, False otherwise
"""
try:
result = KoalaBot.check_guild_has_ext(ctx, "TwitchAlert")
except PermissionError:
result = False
return result
class TwitchAlert(commands.Cog):
"""
A discord.py cog for alerting when someone goes live on twitch
"""
def __init__(self, bot, database_manager=None):
"""
Initialises local variables
:param bot: The bot client for this cog
"""
if not database_manager:
database_manager = KoalaBot.database_manager
self.bot = bot
database_manager.create_base_tables()
database_manager.insert_extension("TwitchAlert", 0, True, True)
self.ta_database_manager = TwitchAlertDBManager(database_manager, bot)
self.ta_database_manager.create_tables()
self.loop_thread = None
self.loop_team_thread = None
self.running = False
self.stop_loop = False
@commands.command(name="twitchEditMsg", aliases=["edit_default_message"])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def edit_default_message(self, ctx, raw_channel_id, *default_live_message):
"""
Edit the default message put in a Twitch Alert Notification
:param ctx: The discord context of the command
:param raw_channel_id: The channel ID where the twitch alert is being used
:param default_live_message: The default live message of users within this Twitch Alert,
leave empty for program default
:return:
"""
try:
channel_id = extract_id(raw_channel_id)
except TypeError:
channel_id = ctx.message.channel.id
default_live_message = (raw_channel_id,) + default_live_message
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
# Assigning default message if provided
if default_live_message is not None and default_live_message != (None,):
default_message = " ".join(default_live_message)
if len(default_message) > 1000:
await ctx.send(embed=error_embed(
"custom_message is too long, try something with less than 1000 characters"))
return
else:
default_message = None
# Creates a new Twitch Alert with the used guild ID and default message if provided
default_message = self.ta_database_manager.new_ta(ctx.message.guild.id, channel_id, default_message,
replace=True)
# Returns an embed with information altered
new_embed = discord.Embed(title="Default Message Edited", colour=KOALA_GREEN,
description=f"Guild: {ctx.message.guild.id}\n"
f"Channel: {channel_id}\n"
f"Default Message: {default_message}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchViewMsg", aliases=["view_default_message"])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def view_default_message(self, ctx, raw_channel_id=None):
"""
Shows the current default message for Twitch Alerts
:param ctx: The discord context of the command
:param raw_channel_id: The channel ID where the twitch alert is being used
leave empty for program default
:return:
"""
if raw_channel_id is None:
channel_id = ctx.message.channel.id
else:
channel_id = extract_id(raw_channel_id)
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
# Creates a new Twitch Alert with the used guild ID and default message if provided
default_message = self.ta_database_manager.get_default_message(channel_id)[0][0]
# Returns an embed with information altered
new_embed = discord.Embed(title="Default Message", colour=KOALA_GREEN,
description=f"Guild: {ctx.message.guild.id}\n"
f"Channel: {channel_id}\n"
f"Default Message: {default_message}")
# new_embed.set_footer(text=f"Twitch Alert ID: {new_id}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchAdd", aliases=['add_user_to_twitch_alert'])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def add_user_to_twitch_alert(self, ctx, raw_channel_id, twitch_username=None, *custom_live_message):
"""
Add a Twitch user to a Twitch Alert
:param ctx: The discord context of the command
:param raw_channel_id: The channel ID where the twitch alert is being used
:param twitch_username: The Twitch Username of the user being added (lowercase)
:param custom_live_message: the custom live message for this user's alert
:return:
"""
try:
channel_id = extract_id(raw_channel_id)
except TypeError:
custom_live_message = (twitch_username,) + custom_live_message
twitch_username = raw_channel_id
channel_id = ctx.message.channel.id
if twitch_username is None:
raise discord.errors.InvalidArgument("twitch_username is a required argument that is missing.")
elif not re.search(TWITCH_USERNAME_REGEX, twitch_username):
raise discord.errors.InvalidArgument(
"The given twitch_username is not a valid username (please use lowercase)")
# Check the channel specified is in this guild
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
default_message = self.ta_database_manager.new_ta(ctx.message.guild.id, channel_id)
# Setting the custom message as required
if custom_live_message is not None and custom_live_message != (None,):
custom_message = " ".join(custom_live_message)
default_message = custom_message
if len(default_message) > 1000:
await ctx.send(embed=error_embed(
"custom_message is too long, try something with less than 1000 characters"))
return
else:
custom_message = None
self.ta_database_manager.add_user_to_ta(channel_id, twitch_username, custom_message, ctx.message.guild.id)
# Response Message
new_embed = discord.Embed(title="Added User to Twitch Alert", colour=KOALA_GREEN,
description=f"Channel: {channel_id}\n"
f"User: {twitch_username}\n"
f"Message: {default_message}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchRemove", aliases=['remove_user_from_twitch_alert'])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def remove_user_from_twitch_alert(self, ctx, raw_channel_id, twitch_username=None):
"""
Removes a user from a Twitch Alert
:param ctx: the discord context
:param raw_channel_id: The discord channel ID of the Twitch Alert
:param twitch_username: The username of the user to be removed
:return:
"""
try:
channel_id = extract_id(raw_channel_id)
except TypeError:
twitch_username = raw_channel_id
channel_id = ctx.message.channel.id
if twitch_username is None:
raise discord.errors.InvalidArgument("twitch_username is a required argument that is missing.")
# Check the channel specified is in this guild
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
await self.ta_database_manager.remove_user_from_ta(channel_id, twitch_username)
# Response Message
new_embed = discord.Embed(title="Removed User from Twitch Alert", colour=KOALA_GREEN,
description=f"Channel: {channel_id}\n"
f"User: {twitch_username}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchAddTeam", aliases=["add_team_to_twitch_alert"])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def add_team_to_twitch_alert(self, ctx, raw_channel_id, team_name=None, *custom_live_message):
"""
Add a Twitch team to a Twitch Alert
:param ctx: The discord context of the command
:param raw_channel_id: The channel ID where the twitch alert is being used
:param team_name: The Twitch team being added (lowercase)
:param custom_live_message: the custom live message for this team's alert
:return:
"""
try:
channel_id = extract_id(raw_channel_id)
except TypeError:
custom_live_message = (team_name,) + custom_live_message
team_name = raw_channel_id
channel_id = ctx.message.channel.id
if team_name is None:
raise discord.errors.InvalidArgument("team_name is a required argument that is missing.")
elif not re.search(TWITCH_USERNAME_REGEX, team_name):
raise discord.errors.InvalidArgument(
"The given team_name is not a valid twitch team name (please use lowercase)")
# Check the channel specified is in this guild
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
self.ta_database_manager.new_ta(ctx.message.guild.id, channel_id)
# Setting the custom message as required
if custom_live_message is not None and custom_live_message != (None,):
default_message = " ".join(custom_live_message)
if len(default_message) > 1000:
await ctx.send(embed=error_embed(
"custom_message is too long, try something with less than 1000 characters"))
return
else:
default_message = DEFAULT_MESSAGE
self.ta_database_manager.add_team_to_ta(channel_id, team_name, default_message, ctx.message.guild.id)
# Response Message
new_embed = discord.Embed(title="Added Team to Twitch Alert", colour=KOALA_GREEN,
description=f"Channel: {channel_id}\n"
f"Team: {team_name}\n"
f"Message: {default_message}")
# new_embed.set_footer(text=f"Twitch Alert ID: {channel_id}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchRemoveTeam", aliases=["remove_team_from_twitch_alert"])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def remove_team_from_twitch_alert(self, ctx, raw_channel_id, team_name=None):
"""
Removes a team from a Twitch Alert
:param ctx: the discord context
:param raw_channel_id: The discord channel ID of the Twitch Alert
:param team_name: The Twitch team being added (lowercase)
:return:
"""
try:
channel_id = extract_id(raw_channel_id)
except TypeError:
team_name = raw_channel_id
channel_id = ctx.message.channel.id
if team_name is None:
raise discord.errors.InvalidArgument("team_name is a required argument that is missing.")
# Check the channel specified is in this guild
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
await self.ta_database_manager.remove_team_from_ta(channel_id, team_name)
# Response Message
new_embed = discord.Embed(title="Removed Team from Twitch Alert", colour=KOALA_GREEN,
description=f"Channel: {channel_id}\n"
f"Team: {team_name}")
await ctx.send(embed=new_embed)
@commands.command(name="twitchList", aliases=["list_twitch_alert"])
@commands.check(KoalaBot.is_admin)
@commands.check(twitch_is_enabled)
async def list_twitch_alert(self, ctx, raw_channel_id=None):
"""
Shows all current TwitchAlert users and teams in a channel
:param ctx:
:param raw_channel_id:
:return:
"""
if raw_channel_id is None:
channel_id = ctx.message.channel.id
else:
channel_id = extract_id(raw_channel_id)
if not is_channel_in_guild(self.bot, ctx.message.guild.id, channel_id):
await ctx.send(embed=error_embed("The channel ID provided is either invalid, or not in this server."))
return
embed = discord.Embed()
embed.title = "Twitch Alerts"
embed.colour = KOALA_GREEN
embed.set_footer(text=f"Channel ID: {channel_id}")
results = self.ta_database_manager.get_users_in_ta(channel_id)
if results:
users = ""
for result in results:
users += f"{result[0]}\n"
embed.add_field(name=":bust_in_silhouette: Users", value=users)
else:
embed.add_field(name=":bust_in_silhouette: Users", value="None")
results = self.ta_database_manager.get_teams_in_ta(channel_id)
if results:
teams = ""
for result in results:
teams += f"{result[0]}\n"
embed.add_field(name=":busts_in_silhouette: Teams", value=teams)
else:
embed.add_field(name=":busts_in_silhouette: Teams", value="None")
await ctx.send(embed=embed)
@commands.Cog.listener()
async def on_ready(self):
"""
When the bot is started up, the loop begins
:return:
"""
if not self.running:
self.start_loops()
def start_loops(self):
self.loop_update_teams.start()
self.loop_check_team_live.start()
self.loop_check_live.start()
self.running = True
def end_loops(self):
self.loop_update_teams.cancel()
self.loop_check_team_live.cancel()
self.loop_check_live.cancel()
self.running = False
@tasks.loop(minutes=LOOP_CHECK_LIVE_DELAY)
async def loop_check_live(self):
"""
A loop that continually checks the live status of users and
sends alerts when online, removing them when offline
:return:
"""
start = time.time()
# logging.info("TwitchAlert: User Loop Started")
sql_find_users = "SELECT twitch_username " \
"FROM UserInTwitchAlert " \
"JOIN TwitchAlerts TA on UserInTwitchAlert.channel_id = TA.channel_id " \
"JOIN (SELECT extension_id, guild_id FROM GuildExtensions " \
"WHERE extension_id = 'TwitchAlert' OR extension_id = 'All') GE on TA.guild_id = GE.guild_id;"
users = self.ta_database_manager.database_manager.db_execute_select(sql_find_users)
usernames = []
for user in users:
if not re.search(TWITCH_USERNAME_REGEX, user[0]):
sql_remove_invalid_user = "DELETE FROM UserInTwitchAlert WHERE twitch_username = ?"
self.ta_database_manager.database_manager.db_execute_commit(sql_remove_invalid_user, args=[user[0]])
else:
usernames.append(user[0])
# user_streams = self.ta_database_manager.twitch_handler.get_streams_data(usernames)
if not usernames:
return
user_streams = await self.ta_database_manager.twitch_handler.get_streams_data(usernames)
if user_streams is None:
return
# Deals with online streams
for streams_details in user_streams:
try:
if streams_details.get('type') == "live":
current_username = str.lower(streams_details.get("user_name"))
usernames.remove(current_username)
sql_find_message_id = \
"SELECT UserInTwitchAlert.channel_id, message_id, custom_message, default_message " \
"FROM UserInTwitchAlert " \
"JOIN TwitchAlerts TA on UserInTwitchAlert.channel_id = TA.channel_id " \
"JOIN (SELECT extension_id, guild_id FROM GuildExtensions " \
"WHERE extension_id = 'TwitchAlert' " \
" OR extension_id = 'All') GE on TA.guild_id = GE.guild_id " \
"WHERE twitch_username = ?;"
results = self.ta_database_manager.database_manager.db_execute_select(
sql_find_message_id, args=[current_username])
new_message_embed = None
for result in results:
channel_id = result[0]
message_id = result[1]
custom_message = result[2]
channel_default_message = result[3]
channel = self.bot.get_channel(id=channel_id)
try:
# If no Alert is posted
if message_id is None:
if new_message_embed is None:
if custom_message is not None:
message = custom_message
else:
message = channel_default_message
new_message_embed = await self.create_alert_embed(streams_details, message)
if new_message_embed is not None and channel is not None:
new_message = await channel.send(embed=new_message_embed)
sql_update_message_id = """
UPDATE UserInTwitchAlert
SET message_id = ?
WHERE channel_id = ?
AND twitch_username = ?"""
self.ta_database_manager.database_manager.db_execute_commit(
sql_update_message_id, args=[new_message.id, result[0], current_username])
except discord.errors.Forbidden as err:
logging.warning(f"TwitchAlert: {err} Name: {channel} ID: {channel.id}")
sql_remove_invalid_channel = "DELETE FROM TwitchAlerts WHERE channel_id = ?"
self.ta_database_manager.database_manager.db_execute_commit(sql_remove_invalid_channel,
args=[channel.id])
except Exception as err:
logging.error(f"TwitchAlert: User Loop error {err}")
# Deals with remaining offline streams
await self.ta_database_manager.delete_all_offline_streams(False, usernames)
time_diff = time.time() - start
if time_diff > 5:
logging.warning(f"TwitchAlert: User Loop Finished in > 5s | {time_diff}s")
async def create_alert_embed(self, stream_data, message):
"""
Creates and sends an alert message
:param stream_data: The twitch stream data to have in the message
:param message: The custom message to be added as a description
:return: The discord message id of the sent message
"""
user_details = await self.ta_database_manager.twitch_handler.get_user_data(
stream_data.get("user_name"))
game_details = await self.ta_database_manager.twitch_handler.get_game_data(
stream_data.get("game_id"))
return create_live_embed(stream_data, user_details, game_details, message)
@tasks.loop(minutes=REFRESH_TEAMS_DELAY)
async def loop_update_teams(self):
start = time.time()
# logging.info("TwitchAlert: Started Update Teams")
await self.ta_database_manager.update_all_teams_members()
time_diff = time.time() - start
if time_diff > 5:
logging.warning(f"TwitchAlert: Teams updated in > 5s | {time_diff}s")
@tasks.loop(minutes=TEAMS_LOOP_CHECK_LIVE_DELAY)
async def loop_check_team_live(self):
"""
A loop to repeatedly send messages if a member of a team is live, and remove it when they are not
:return:
"""
start = time.time()
# logging.info("TwitchAlert: Team Loop Started")
sql_select_team_users = "SELECT twitch_username, twitch_team_name " \
"FROM UserInTwitchTeam " \
"JOIN TeamInTwitchAlert TITA " \
" ON UserInTwitchTeam.team_twitch_alert_id = TITA.team_twitch_alert_id " \
"JOIN TwitchAlerts TA on TITA.channel_id = TA.channel_id " \
"JOIN (SELECT extension_id, guild_id FROM GuildExtensions " \
"WHERE extension_id = 'TwitchAlert' " \
" OR extension_id = 'All') GE on TA.guild_id = GE.guild_id "
users_and_teams = self.ta_database_manager.database_manager.db_execute_select(sql_select_team_users)
usernames = []
for user in users_and_teams:
if not re.search(TWITCH_USERNAME_REGEX, user[1]):
sql_remove_invalid_user = "DELETE FROM TeamInTwitchAlert WHERE twitch_team_name = ?"
self.ta_database_manager.database_manager.db_execute_commit(sql_remove_invalid_user, args=[user[1]])
else:
usernames.append(user[0])
if not usernames:
return
streams_data = await self.ta_database_manager.twitch_handler.get_streams_data(usernames)
if streams_data is None:
return
# Deals with online streams
for stream_data in streams_data:
try:
if stream_data.get('type') == "live":
current_username = str.lower(stream_data.get("user_name"))
usernames.remove(current_username)
sql_find_message_id = """
SELECT TITA.channel_id, UserInTwitchTeam.message_id, TITA.team_twitch_alert_id, custom_message,
default_message
FROM UserInTwitchTeam
JOIN TeamInTwitchAlert TITA on UserInTwitchTeam.team_twitch_alert_id = TITA.team_twitch_alert_id
JOIN TwitchAlerts TA on TITA.channel_id = TA.channel_id
JOIN (SELECT extension_id, guild_id
FROM GuildExtensions
WHERE extension_id = 'TwitchAlert' OR extension_id = 'All') GE ON TA.guild_id = GE.guild_id
WHERE twitch_username = ?"""
results = self.ta_database_manager.database_manager.db_execute_select(
sql_find_message_id, args=[current_username])
new_message_embed = None
for result in results:
channel_id = result[0]
message_id = result[1]
team_twitch_alert_id = result[2]
custom_message = result[3]
channel_default_message = result[4]
channel = self.bot.get_channel(id=channel_id)
try:
# If no Alert is posted
if message_id is None:
if new_message_embed is None:
if custom_message is not None:
message = custom_message
else:
message = channel_default_message
new_message_embed = await self.create_alert_embed(stream_data, message)
if new_message_embed is not None and channel is not None:
new_message = await channel.send(embed=new_message_embed)
sql_update_message_id = """
UPDATE UserInTwitchTeam
SET message_id = ?
WHERE team_twitch_alert_id = ?
AND twitch_username = ?"""
self.ta_database_manager.database_manager.db_execute_commit(
sql_update_message_id,
args=[new_message.id, team_twitch_alert_id, current_username])
except discord.errors.Forbidden as err:
logging.warning(f"TwitchAlert: {err} Name: {channel} ID: {channel.id}")
sql_remove_invalid_channel = "DELETE FROM TwitchAlerts WHERE channel_id = ?"
self.ta_database_manager.database_manager.db_execute_commit(sql_remove_invalid_channel,
args=[channel.id])
except Exception as err:
logging.error(f"TwitchAlert: Team Loop error {err}")
# Deals with remaining offline streams
await self.ta_database_manager.delete_all_offline_streams(True, usernames)
time_diff = time.time() - start
if time_diff > 5:
logging.warning(f"TwitchAlert: Teams Loop Finished in > 5s | {time_diff}s")
def create_live_embed(stream_info, user_info, game_info, message):
"""
Creates an embed for the go live announcement
:param stream_info: The stream data from the Twitch API
:param user_info: The user data for this streamer from the Twitch API
:param game_info: The game data for this game from the Twitch API
:param message: The custom message to be added as a description
:return: The embed created
"""
embed = discord.Embed(colour=KOALA_GREEN)
if message is not None and message != "":
embed.description = message
embed.set_author(name=stream_info.get("user_name") + " is now streaming!",
icon_url=TWITCH_ICON)
embed.title = "https://twitch.tv/" + str.lower(stream_info.get("user_name"))
embed.add_field(name="Stream Title", value=stream_info.get("title"))
if game_info is None:
embed.add_field(name="Playing", value="No Category")
else:
embed.add_field(name="Playing", value=game_info.get("name"))
embed.set_thumbnail(url=user_info.get("profile_image_url"))
return embed
class TwitchAPIHandler:
"""
A wrapper to interact with the twitch API
"""
def __init__(self, client_id: str, client_secret: str):
self.client_id = client_id
self.client_secret = client_secret
self.params = {'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'client_credentials'}
self.token = {}
@property
def base_headers(self):
return {
'Authorization': f'Bearer {self.token.get("access_token")}',
'Client-ID': self.client_id
}
async def get_new_twitch_oauth(self):
"""
Get a new OAuth2 token from twitch using client_id and client_secret
:return: The new OAuth2 token
"""
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(60)) as client:
async with client.post('https://id.twitch.tv/oauth2/token', params=self.params) as response:
if response.status > 399:
logging.critical(f'TwitchAlert: Error {response.status} while getting Oauth token')
self.token = {}
response_json = await response.json()
try:
response_json['expires_in'] += time.time()
except KeyError:
# probably shouldn't need this, but catch just in case
logging.warning('TwitchAlert: Failed to set token expiration time')
self.token = response_json
return self.token
async def requests_get(self, url, headers=None, params=None):
"""
Gets a response from a curl get request to the given url using headers of this object
:param headers: the Headers required for the request, will use self.headers by default
:param url: The URL to send the request to
:param params: The parameters of the request
:return: The response of the request
"""
if self.token.get('expires_in', 0) <= time.time() + 1 or not self.token:
await self.get_new_twitch_oauth()
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(60)) as client:
async with client.get(url=url, headers=headers if headers else self.base_headers, params=params) as \
response:
if response.status == 401:
logging.info(f"TwitchAlert: {response.status}, getting new oauth and retrying")
await self.get_new_twitch_oauth()
return await self.requests_get(url, headers, params)
elif response.status > 399:
logging.warning(f'TwitchAlert: {response.status} while getting requesting URL:{url}')
return await response.json()
async def get_streams_data(self, usernames):
"""
Gets all stream information from a list of given usernames
:param usernames: The list of usernames
:return: The JSON data of the request
"""
url = 'https://api.twitch.tv/helix/streams?'
next_hundred_users = usernames[:100]
usernames = usernames[100:]
result = (await self.requests_get(url + "user_login=" + "&user_login=".join(next_hundred_users))).get("data")
while usernames:
next_hundred_users = usernames[:100]
usernames = usernames[100:]
result += (await self.requests_get(url + "user_login=" + "&user_login=".join(next_hundred_users))).get(
"data")
return result
async def get_user_data(self, username):
"""
Gets the user information of a given user
:param username: The display twitch username of the user
:return: The JSON information of the user's data
"""
url = 'https://api.twitch.tv/helix/users?login=' + username
return (await self.requests_get(url)).get("data")[0]
async def get_game_data(self, game_id):
"""
Gets the game information of a given game
:param game_id: The twitch game ID of a game
:return: The JSON information of the game's data
"""
if game_id != "":
url = 'https://api.twitch.tv/helix/games?id=' + game_id
game_data = await self.requests_get(url)
return game_data.get("data")[0]
else:
return None
async def get_team_users(self, team_id):
"""
Gets the users data about a given team
:param team_id: The team name of the twitch team
:return: the JSON information of the users
"""
url = 'https://api.twitch.tv/helix/teams?name=' + team_id
return (
await self.requests_get(url)).get("data")[0].get("users")
class TwitchAlertDBManager:
"""
A class for interacting with the Koala twitch database
"""
def __init__(self, database_manager: KoalaDBManager.KoalaDBManager, bot_client: discord.client):
"""
Initialises local variables
:param database_manager:
:param bot_client:
"""
self.database_manager = database_manager
self.twitch_handler = TwitchAPIHandler(TWITCH_CLIENT_ID, TWITCH_SECRET)
self.bot = bot_client
def get_parent_database_manager(self):
"""
A getter for the database manager of this object
:return:
"""
return self.database_manager
def create_tables(self):
"""
Creates all the tables associated with the twitch alert extension
:return:
"""
# TwitchAlerts
sql_create_twitch_alerts_table = """
CREATE TABLE IF NOT EXISTS TwitchAlerts (
guild_id integer NOT NULL,
channel_id integer NOT NULL,
default_message text NOT NULL,
PRIMARY KEY (guild_id, channel_id),
CONSTRAINT fk_guild
FOREIGN KEY (guild_id)
REFERENCES GuildExtensions (guild_id)
ON DELETE CASCADE
);"""
# UserInTwitchAlert
sql_create_user_in_twitch_alert_table = """
CREATE TABLE IF NOT EXISTS UserInTwitchAlert (
channel_id integer NOT NULL,
twitch_username text NOT NULL,
custom_message text,
message_id integer,
PRIMARY KEY (channel_id, twitch_username),
CONSTRAINT fk_channel
FOREIGN KEY (channel_id)
REFERENCES TwitchAlerts (channel_id)
ON DELETE CASCADE
);"""
# TeamInTwitchAlert
sql_create_team_in_twitch_alert_table = """
CREATE TABLE IF NOT EXISTS TeamInTwitchAlert (
team_twitch_alert_id integer PRIMARY KEY AUTOINCREMENT,
channel_id integer NOT NULL,
twitch_team_name text NOT NULL,
custom_message text,
CONSTRAINT fk_channel
FOREIGN KEY (channel_id)
REFERENCES TwitchAlerts (channel_id)
ON DELETE CASCADE
);"""
# UserInTwitchTeam
sql_create_user_in_twitch_team_table = """
CREATE TABLE IF NOT EXISTS UserInTwitchTeam (
team_twitch_alert_id text NOT NULL,
twitch_username text NOT NULL,
message_id integer,
PRIMARY KEY (team_twitch_alert_id, twitch_username),
CONSTRAINT fk_twitch_team_alert
FOREIGN KEY (team_twitch_alert_id)
REFERENCES TeamInTwitchAlert (team_twitch_alert_id)
ON DELETE CASCADE
);"""
# Create Tables
self.database_manager.db_execute_commit(sql_create_twitch_alerts_table)
self.database_manager.db_execute_commit(sql_create_user_in_twitch_alert_table)
self.database_manager.db_execute_commit(sql_create_team_in_twitch_alert_table)
self.database_manager.db_execute_commit(sql_create_user_in_twitch_team_table)
def new_ta(self, guild_id, channel_id, default_message=None, replace=False):
"""
Creates a new Twitch Alert and gives the ID associated with it
:param guild_id: The discord guild ID where the Twitch Alert is located
:param channel_id: The discord channel ID of the twitch Alert
:param default_message: The default message of users in the Twitch Alert
:param replace: True if the new ta should replace the current if exists
:return: The new default_message
"""
sql_find_ta = "SELECT default_message FROM TwitchAlerts WHERE channel_id=?"
message = self.database_manager.db_execute_select(sql_find_ta, args=[channel_id])
if message and not replace:
return message[0][0]
# Sets the default message if not provided
if default_message is None:
default_message = DEFAULT_MESSAGE
# Insert new Twitch Alert to database
if replace:
sql_insert_twitch_alert = """
REPLACE INTO TwitchAlerts(guild_id, channel_id, default_message)
VALUES(?,?,?)
"""
else:
sql_insert_twitch_alert = """
INSERT INTO TwitchAlerts(guild_id, channel_id, default_message)
VALUES(?,?,?)
"""
self.database_manager.db_execute_commit(sql_insert_twitch_alert, args=[guild_id, channel_id, default_message])
return default_message
def get_default_message(self, channel_id):
"""
Get the set default message for the twitch alert
:param channel_id: The discord channel ID of the twitch Alert
:return: The current default_message
"""
sql_find_ta = "SELECT default_message FROM TwitchAlerts WHERE channel_id= ?"
return self.database_manager.db_execute_select(sql_find_ta, args=[channel_id])
def add_user_to_ta(self, channel_id, twitch_username, custom_message, guild_id=None):
"""
Add a twitch user to a given Twitch Alert
:param channel_id: The discord channel ID of the twitch Alert
:param twitch_username: The Twitch username of the user to be added
:param custom_message: The custom Message of the user's live notification.
None = use default Twitch Alert message
:param guild_id: The guild ID of the channel
:return:
:raises: KeyError if channel ID is not defined in TwitchAlerts and guild_id is not provided
"""
self.new_ta(guild_id, channel_id)
if custom_message:
sql_insert_user_twitch_alert = """
INSERT INTO UserInTwitchAlert(channel_id, twitch_username, custom_message)
VALUES(?, ?, ?)
"""
self.database_manager.db_execute_commit(
sql_insert_user_twitch_alert, args=[channel_id, str.lower(twitch_username), custom_message])
else:
sql_insert_user_twitch_alert = """
INSERT INTO UserInTwitchAlert(channel_id, twitch_username)
VALUES(?, ?)
"""
self.database_manager.db_execute_commit(
sql_insert_user_twitch_alert, args=[channel_id, str.lower(twitch_username)])
async def remove_user_from_ta(self, channel_id, twitch_username):
"""
Removes a user from a given Twitch Alert
:param channel_id: The discord channel ID of the twitch Alert
:param twitch_username: The Twitch username of the user to be added
:return:
"""
sql_get_message_id = "SELECT message_id " \
"FROM UserInTwitchAlert " \
"WHERE twitch_username = ? " \
"AND channel_id = ? "
message_id = self.database_manager.db_execute_select(sql_get_message_id,
args=[twitch_username, channel_id])[0][0]
if message_id is not None:
await self.delete_message(message_id, channel_id)
sql_remove_entry = """DELETE FROM UserInTwitchAlert
WHERE twitch_username = ? AND channel_id = ?"""
self.database_manager.db_execute_commit(sql_remove_entry, args=[twitch_username, channel_id])
async def delete_message(self, message_id, channel_id):
"""
Deletes a given discord message
:param message_id: discord message ID of the message to delete
:param channel_id: discord channel ID which has the message
:return:
"""
try:
channel = self.bot.get_channel(int(channel_id))
if channel is None:
logging.warning(f"TwitchAlert: Channel ID {channel_id} does not exist, removing from database")
sql_remove_invalid_channel = "DELETE FROM TwitchAlerts WHERE channel_id = ?"
self.database_manager.db_execute_commit(sql_remove_invalid_channel, args=[channel_id])
return
message = await channel.fetch_message(message_id)
await message.delete()
except discord.errors.NotFound as err:
logging.warning(f"TwitchAlert: Message ID {message_id} does not exist, skipping \nError: {err}")
except discord.errors.Forbidden as err:
logging.warning(f"TwitchAlert: {err} Channel ID: {channel_id}")
sql_remove_invalid_channel = "DELETE FROM TwitchAlerts WHERE channel_id = ?"
self.database_manager.db_execute_commit(sql_remove_invalid_channel, args=[channel_id])
def get_users_in_ta(self, channel_id):
"""
Returns all users in a given Twitch Alert
:param channel_id: The channel ID of the Twitch Alert
:return: The sql results of the users
"""
sql_get_users = "SELECT twitch_username FROM UserInTwitchAlert WHERE channel_id = ?"
return self.database_manager.db_execute_select(sql_get_users, args=[channel_id])
def get_teams_in_ta(self, channel_id):
"""
Returns all teams in a given Twitch Alert
:param channel_id: The channel ID of the Twitch Alert
:return: The sql results of the teams
"""
sql_get_teams = "SELECT twitch_team_name FROM TeamInTwitchAlert WHERE channel_id = ?"
return self.database_manager.db_execute_select(sql_get_teams, args=[channel_id])
def add_team_to_ta(self, channel_id, twitch_team, custom_message, guild_id=None):
"""
Add a twitch team to a given Twitch Alert
:param channel_id: The discord channel ID of the twitch Alert
:param twitch_team: The Twitch team to be added
:param custom_message: The custom Message of the team's live notification.
None = use default Twitch Alert message
:param guild_id: The guild ID of the channel
:return:
:raises: KeyError if channel ID is not defined in TwitchAlerts and guild_id is not provided
"""
self.new_ta(guild_id, channel_id)
if custom_message:
sql_insert_team_twitch_alert = """
INSERT INTO TeamInTwitchAlert(channel_id, twitch_team_name, custom_message)
VALUES(?, ?, ?)
"""
self.database_manager.db_execute_commit(
sql_insert_team_twitch_alert, args=[channel_id, str.lower(twitch_team), custom_message])
else:
sql_insert_team_twitch_alert = """
INSERT INTO TeamInTwitchAlert(channel_id, twitch_team_name)
VALUES(?, ?)
"""
self.database_manager.db_execute_commit(
sql_insert_team_twitch_alert, args=[channel_id, str.lower(twitch_team)])
async def remove_team_from_ta(self, channel_id, team_name):
"""
Removes a team from a given twitch alert
:param channel_id: The channel ID of the Twitch Alert
:param team_name: The team name of the team to be removed
:return:
"""
sql_get_team_alert_id = "SELECT team_twitch_alert_id " \
"FROM TeamInTwitchAlert " \
"WHERE twitch_team_name = ? " \
" AND channel_id = ?"
result = self.database_manager.db_execute_select(sql_get_team_alert_id, args=[team_name, channel_id])
if not result:
raise AttributeError("Team name not found")
team_alert_id = result[0][0]
sql_get_message_id = """SELECT UserInTwitchTeam.message_id
FROM UserInTwitchTeam
WHERE team_twitch_alert_id = ?"""
message_ids = self.database_manager.db_execute_select(sql_get_message_id, args=[team_alert_id])
if message_ids is not None:
for message_id in message_ids:
if message_id[0] is not None:
await self.delete_message(message_id[0], channel_id)
sql_remove_users = """DELETE FROM UserInTwitchTeam WHERE team_twitch_alert_id = ?"""
sql_remove_team = """DELETE FROM TeamInTwitchAlert WHERE team_twitch_alert_id = ?"""
self.database_manager.db_execute_commit(sql_remove_users, args=[team_alert_id])
self.database_manager.db_execute_commit(sql_remove_team, args=[team_alert_id])
async def update_team_members(self, twitch_team_id, team_name):
"""
Users in a team are updated to ensure they are assigned to the correct team
:param twitch_team_id: the team twitch alert id
:param team_name: the name of the team
:return:
"""
if re.search(TWITCH_USERNAME_REGEX, team_name):
users = await self.twitch_handler.get_team_users(team_name)
for user in users:
sql_add_user = """INSERT OR IGNORE INTO UserInTwitchTeam(team_twitch_alert_id, twitch_username)
VALUES(?, ?)"""
try:
self.database_manager.db_execute_commit(sql_add_user, args=[twitch_team_id, user.get("user_login")],
pass_errors=True)
except KoalaDBManager.sqlite3.IntegrityError as err:
logging.error(f"Twitch Alert: 1034: {err}")
pass
async def update_all_teams_members(self):
"""
Updates all teams with the current team members
:return:
"""
sql_get_teams = """SELECT team_twitch_alert_id, twitch_team_name FROM TeamInTwitchAlert"""
teams_info = self.database_manager.db_execute_select(sql_get_teams)
for team_info in teams_info:
await self.update_team_members(team_info[0], team_info[1])
async def delete_all_offline_streams(self, team: bool, usernames):
"""
A method that deletes all currently offline streams
:param team: True if the users are from teams, false if individuals
:param usernames: The usernames of the team members
:return:
"""
if team:
sql_select_offline_streams_with_message_ids = f"""
SELECT channel_id, message_id
FROM UserInTwitchTeam
JOIN TeamInTwitchAlert TITA on UserInTwitchTeam.team_twitch_alert_id = TITA.team_twitch_alert_id
WHERE message_id NOT NULL
AND twitch_username in ({','.join(['?'] * len(usernames))})"""
sql_update_offline_streams = f"""
UPDATE UserInTwitchTeam
SET message_id = NULL
WHERE twitch_username in ({','.join(['?'] * len(usernames))})"""
else:
sql_select_offline_streams_with_message_ids = f"""
SELECT channel_id, message_id
FROM UserInTwitchAlert
WHERE message_id NOT NULL
AND twitch_username in ({','.join(['?'] * len(usernames))})"""
sql_update_offline_streams = f"""
UPDATE UserInTwitchAlert
SET message_id = NULL
WHERE twitch_username in ({','.join(['?'] * len(usernames))})"""
results = self.database_manager.db_execute_select(
sql_select_offline_streams_with_message_ids, usernames)
for result in results:
await self.delete_message(result[1], result[0])
self.database_manager.db_execute_commit(sql_update_offline_streams, usernames)
def setup(bot: KoalaBot) -> None:
"""
Load this cog to the KoalaBot.
:param bot: the bot client for KoalaBot
"""
if TWITCH_SECRET is None or TWITCH_CLIENT_ID is None:
logging.error("TwitchAlert not started. API keys not found in environment.")
print("TwitchAlert not started. API keys not found in environment.")
KoalaBot.database_manager.insert_extension("TwitchAlert", 0, False, False)
else:
bot.add_cog(TwitchAlert(bot))
logging.info("TwitchAlert is ready.")
print("TwitchAlert is ready.")
|
import superimport
import itertools
import matplotlib.pyplot as plt
import numpy as np
from scipy.linalg import eigh
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import rbf_kernel
import pyprobml_utils as pml
plt.style.use('classic')
def spectral_clustering_demo():
np.random.seed(0)
num_clusters = 2
for data_type, data in (('circle', sample_circle(num_clusters)),
('spiral', sample_spiral())):
kmeans = KMeans(n_clusters=num_clusters, random_state=0)
kmeans.fit(data)
assignments = kmeans.predict(data)
plot_data(data, assignments, 'k-means clustering', data_type)
sigma = 0.1
gamma = 1 / (2 * sigma ** 2)
W = rbf_kernel(data, gamma=gamma)
d = np.sum(W, 1, keepdims=True)
sqrt_d = np.sqrt(d)
normalized_W = (W / sqrt_d) / sqrt_d.T
paranoid_assert(W, normalized_W, False)
# We select the largest eigen values of normalized_W, rather
# than the smallest eigenvalues of I - normalized_W. The two
# problems are equivalent. The eigen values can be converted
# between the two problems via `1 - eigen_values`. The eigen
# vectors are the same between both problems.
eigen_values, eigen_vectors = eigh(normalized_W,
# Get only the top num_clusters eigenvalues
eigvals=(data.shape[0] - num_clusters, data.shape[0]-1))
eigen_vectors = eigen_vectors / np.linalg.norm(eigen_vectors, axis=1, keepdims=True)
kmeans.fit(eigen_vectors)
assignments = kmeans.predict(eigen_vectors)
plot_data(data, assignments, 'spectral clustering', data_type)
plt.show()
def paranoid_assert(W, normalized_W, enable):
if not enable:
return
D = np.diag(np.sum(W, 1))
L = D - W
D_inv_sqrt = np.diag(1 / np.diag(np.sqrt(D)))
np.testing.assert_almost_equal(np.sum(L, 1), 0, err_msg="Rows of Laplacian must sum to 0.")
np.testing.assert_allclose(normalized_W, D_inv_sqrt * W * D_inv_sqrt, rtol=0, atol=1)
def sample_circle(num_clusters):
points_per_cluster = 500
bandwidth = 0.1
data = np.zeros((num_clusters * points_per_cluster, 2))
for k, n in itertools.product(range(num_clusters), range(points_per_cluster)):
theta = 2 * np.pi * np.random.uniform()
rho = k + 1 + np.random.randn() * bandwidth
x, y = pol2cart(theta, rho)
idx = k * points_per_cluster + n
data[idx, 0] = x
data[idx, 1] = y
data = data.reshape((num_clusters * points_per_cluster, 2))
return data
def pol2cart(theta, rho):
x = rho * np.cos(theta)
y = rho * np.sin(theta)
return(x, y)
def sample_spiral():
# Only 2 clusters in this case. This is hard-coded.
points_per_cluster = 500
bandwidth = 0.1
data = np.empty((points_per_cluster, 2))
w = np.arange(1, points_per_cluster + 1).astype(np.float32) / points_per_cluster
data[:,0] = (4 * w + 1) * np.cos(2*np.pi * w) + np.random.randn(points_per_cluster) * bandwidth
data[:,1] = (4 * w + 1) * np.sin(2*np.pi * w) + np.random.randn(points_per_cluster) * bandwidth
data = np.vstack((data, -data))
return data
def plot_data(data, assignments, title, data_type):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(data[assignments == 0, 0], data[assignments == 0, 1], 'o', color='r')
ax.plot(data[assignments == 1, 0], data[assignments == 1, 1], 'o', color='b')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.axis('square')
ax.grid(True)
ax.set_title(title)
plt.tight_layout()
pml.savefig(f"{data_type}_{title.replace(" ", "_")}.pdf")
if __name__ == '__main__':
spectral_clustering_demo()
|
import superimport
import itertools
import matplotlib.pyplot as plt
import numpy as np
from scipy.linalg import eigh
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import rbf_kernel
import pyprobml_utils as pml
plt.style.use('classic')
def spectral_clustering_demo():
np.random.seed(0)
num_clusters = 2
for data_type, data in (('circle', sample_circle(num_clusters)),
('spiral', sample_spiral())):
kmeans = KMeans(n_clusters=num_clusters, random_state=0)
kmeans.fit(data)
assignments = kmeans.predict(data)
plot_data(data, assignments, 'k-means clustering', data_type)
sigma = 0.1
gamma = 1 / (2 * sigma ** 2)
W = rbf_kernel(data, gamma=gamma)
d = np.sum(W, 1, keepdims=True)
sqrt_d = np.sqrt(d)
normalized_W = (W / sqrt_d) / sqrt_d.T
paranoid_assert(W, normalized_W, False)
# We select the largest eigen values of normalized_W, rather
# than the smallest eigenvalues of I - normalized_W. The two
# problems are equivalent. The eigen values can be converted
# between the two problems via `1 - eigen_values`. The eigen
# vectors are the same between both problems.
eigen_values, eigen_vectors = eigh(normalized_W,
# Get only the top num_clusters eigenvalues
eigvals=(data.shape[0] - num_clusters, data.shape[0]-1))
eigen_vectors = eigen_vectors / np.linalg.norm(eigen_vectors, axis=1, keepdims=True)
kmeans.fit(eigen_vectors)
assignments = kmeans.predict(eigen_vectors)
plot_data(data, assignments, 'spectral clustering', data_type)
plt.show()
def paranoid_assert(W, normalized_W, enable):
if not enable:
return
D = np.diag(np.sum(W, 1))
L = D - W
D_inv_sqrt = np.diag(1 / np.diag(np.sqrt(D)))
np.testing.assert_almost_equal(np.sum(L, 1), 0, err_msg="Rows of Laplacian must sum to 0.")
np.testing.assert_allclose(normalized_W, D_inv_sqrt * W * D_inv_sqrt, rtol=0, atol=1)
def sample_circle(num_clusters):
points_per_cluster = 500
bandwidth = 0.1
data = np.zeros((num_clusters * points_per_cluster, 2))
for k, n in itertools.product(range(num_clusters), range(points_per_cluster)):
theta = 2 * np.pi * np.random.uniform()
rho = k + 1 + np.random.randn() * bandwidth
x, y = pol2cart(theta, rho)
idx = k * points_per_cluster + n
data[idx, 0] = x
data[idx, 1] = y
data = data.reshape((num_clusters * points_per_cluster, 2))
return data
def pol2cart(theta, rho):
x = rho * np.cos(theta)
y = rho * np.sin(theta)
return(x, y)
def sample_spiral():
# Only 2 clusters in this case. This is hard-coded.
points_per_cluster = 500
bandwidth = 0.1
data = np.empty((points_per_cluster, 2))
w = np.arange(1, points_per_cluster + 1).astype(np.float32) / points_per_cluster
data[:,0] = (4 * w + 1) * np.cos(2*np.pi * w) + np.random.randn(points_per_cluster) * bandwidth
data[:,1] = (4 * w + 1) * np.sin(2*np.pi * w) + np.random.randn(points_per_cluster) * bandwidth
data = np.vstack((data, -data))
return data
def plot_data(data, assignments, title, data_type):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(data[assignments == 0, 0], data[assignments == 0, 1], 'o', color='r')
ax.plot(data[assignments == 1, 0], data[assignments == 1, 1], 'o', color='b')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.axis('square')
ax.grid(True)
ax.set_title(title)
plt.tight_layout()
pml.savefig(f"{data_type}_{title.replace(' ', '_')}.pdf")
if __name__ == '__main__':
spectral_clustering_demo()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.