repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_bkp_restore_s3_recover.py
|
import pytest
from .common import * # NOQA
from .test_rke_cluster_provisioning import HOST_NAME
from .test_rke_cluster_provisioning import create_and_validate_custom_host
from .test_rke_cluster_provisioning import rke_config
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None,
"nodes": []}
backup_info = {"backupname": None, "backup_id": None, "workload": None,
"backupfilename": None, "etcdbackupdata": None}
@if_test_all_snapshot
def test_bkp_restore_s3_recover_validate():
"""
- This test create 1 cluster with s3 backups enabled
- 1 ControlPlane/worker node
- 2 worker nodes
- 3 etcd nodes
- Creates an Ingress pointing to a workload
- Snapshots the cluster and checks the backup is in S3
- Stops the etcd nodes in ec2
- Waits for the cluster to go into unavailable state
- Removes all 3 etcd nodes
- Creates 3 new etcd nodes and waits until the cluster
asks to restore from backup.
- Restores from S3 backup
- Cluster is validated after it gets in Active state.
- Checks the Ingress created before the backup is functional after restore.
- Removes cluster if RANCHER_CLEANUP_CLUSTER=True
"""
cluster = namespace["cluster"]
client = namespace["client"]
ns, b_info = validate_backup_create(namespace, backup_info, "s3")
ips_to_remove = []
etcd_nodes = get_etcd_nodes(client, cluster)
assert len(etcd_nodes) > 0, "Make sure we have etcd nodes in the cluster"
# stop the etcd ec2 instances
[stop_node_from_ec2(etcd_node.externalIpAddress)
for etcd_node in etcd_nodes]
# wait for cluster to get into unavailable state
cluster = wait_for_cluster_unavailable_or_error(client, cluster)
for etcd_node in etcd_nodes:
ips_to_remove.append(etcd_node.customConfig['internalAddress'])
client.delete(etcd_node)
# Also remove the ec2 instances
for ip_to_remove in ips_to_remove:
delete_node_from_ec2(ip_to_remove)
namespace["nodes"] = [node for node
in namespace["nodes"]
if node.private_ip_address != ip_to_remove]
ips_to_remove.clear()
cluster = client.reload(cluster)
wait_for_cluster_node_count(client, cluster, 3)
# Add completely new etcd nodes to the cluster
cluster = add_new_etcd_nodes(client, cluster)
cluster = client.reload(cluster)
wait_for_cluster_node_count(client, cluster, 6, 500)
# This message is expected to appear after we add new etcd nodes
# The cluster will require the user to perform a backup to recover
# this is appears in the cluster object in cluster.transitioningMessage
message = "Please restore your cluster from backup"
cluster = wait_for_cluster_transitioning_message(client, cluster, message)
etcd_nodes = get_etcd_nodes(client, cluster)
assert len(etcd_nodes) == 3, "Make sure the cluster now has 3 etcd nodes"
cluster = restore_backup_to_recover(client, cluster, b_info)
# validate the ingress that was created in the first backup
# after restoring and recovering the cluster
validate_ingress(namespace["p_client"], cluster,
[b_info["workload"]], ns["host"],
"/name.html")
@pytest.fixture(scope='module', autouse=True)
def create_project_client_and_cluster_s3_three_etcd(request):
node_roles = [
["controlplane", "worker"],
["etcd"], ["etcd"], ["etcd"],
["worker"], ["worker"]
]
rke_config["services"]["etcd"]["backupConfig"] = {
"enabled": "true",
"intervalHours": 6,
"retention": 3,
"type": "backupConfig",
"s3BackupConfig": {
"type": "s3BackupConfig",
"accessKey": AWS_ACCESS_KEY_ID,
"secretKey": AWS_SECRET_ACCESS_KEY,
"bucketName": AWS_S3_BUCKET_NAME,
"folder": AWS_S3_BUCKET_FOLDER_NAME,
"region": AWS_REGION,
"endpoint": "s3.amazonaws.com"
}
}
cluster, aws_nodes = create_and_validate_custom_host(
node_roles,
random_cluster_name=True
)
client = get_user_client()
namespace["nodes"].extend(aws_nodes)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testrecover")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
namespace["client"] = client
def fin():
cluster_cleanup(client, cluster, namespace["nodes"])
request.addfinalizer(fin)
def wait_for_cluster_unavailable_or_error(client, cluster):
return wait_for_condition(
client,
cluster,
lambda x: x.state == "unavailable" or x.state == "error",
lambda x: "State is: " + x.state,
timeout=DEFAULT_CLUSTER_STATE_TIMEOUT,
)
def wait_for_cluster_transitioning_message(client, cluster, message):
start = time.time()
while message not in cluster.transitioningMessage:
print(cluster.transitioningMessage)
time.sleep(5)
cluster = client.reload(cluster)
# We are waiting 4 minutes for the transitioning message to appear
# this could be impacted by environmental factors
if time.time() - start > DEFAULT_CLUSTER_STATE_TIMEOUT:
raise Exception('Timeout waiting for condition')
return cluster
def add_new_etcd_nodes(client, cluster, no_of_nodes=3):
aws_nodes = AmazonWebServices().create_multiple_nodes(
no_of_nodes, random_test_name(HOST_NAME))
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, ["etcd"],
aws_node)
print("Docker run command: " + docker_run_cmd)
aws_node.roles.append("etcd")
result = aws_node.execute_command(docker_run_cmd)
namespace["nodes"].append(aws_node)
print(result)
return cluster
def delete_node_from_ec2(internal_ip):
filters = [
{'Name': 'private-ip-address',
'Values': [internal_ip]}
]
aws_node = AmazonWebServices().get_nodes(filters)
if len(aws_node) > 0:
AmazonWebServices().delete_node(aws_node[0])
def stop_node_from_ec2(address):
filters = [
{'Name': 'ip-address',
'Values': [address]}
]
aws_node = AmazonWebServices().get_nodes(filters)
if len(aws_node) > 0:
AmazonWebServices().stop_node(aws_node[0])
def restore_backup_to_recover(client, cluster, b_info):
cluster.restoreFromEtcdBackup(etcdBackupId=b_info["backup_id"])
return validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=False)
def get_etcd_nodes(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
return [node for node in nodes if node.etcd is True]
def get_worker_nodes(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
return [node for node in nodes if node.worker is True]
| 7,295 | 36.22449 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_rbac_2.py
|
import pytest
import os
from .common import create_kubeconfig
from .common import DATA_SUBDIR
from .common import get_user_client_and_cluster
from .common import rbac_test_file_reader
from .common import validate_cluster_role_rbac
from .common import if_test_rbac_v2
@pytest.fixture(scope='module', autouse="True")
def create_project_client():
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
@if_test_rbac_v2
@pytest.mark.parametrize("cluster_role, command, authorization, name",
rbac_test_file_reader(os.path.join(
DATA_SUBDIR,
'rbac/monitoring/monitoring_rbac.json')))
def test_monitoring_rbac_v2(cluster_role, command, authorization, name):
validate_cluster_role_rbac(cluster_role, command, authorization, name)
@if_test_rbac_v2
@pytest.mark.parametrize("cluster_role, command, authorization, name",
rbac_test_file_reader(os.path.join(
DATA_SUBDIR,
'rbac/istio/istio_rbac.json')))
def test_istio_rbac_v2(cluster_role, command, authorization, name):
validate_cluster_role_rbac(cluster_role, command, authorization, name)
@if_test_rbac_v2
@pytest.mark.parametrize("cluster_role, command, authorization, name",
rbac_test_file_reader(os.path.join(
DATA_SUBDIR,
'rbac/logging/logging_rbac.json')))
def test_logging_rbac_v2(cluster_role, command, authorization, name):
validate_cluster_role_rbac(cluster_role, command, authorization, name)
@if_test_rbac_v2
@pytest.mark.parametrize("cluster_role, command, authorization, name",
rbac_test_file_reader(os.path.join(
DATA_SUBDIR,
'rbac/cis/cis_rbac.json')))
def test_cis_rbac_v2(cluster_role, command, authorization, name):
validate_cluster_role_rbac(cluster_role, command, authorization, name)
| 2,032 | 38.862745 | 74 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_cluster_templates.py
|
import copy
import os
import pytest
import requests
from rancher import ApiError
from .common import * # NOQA
from .test_monitoring import cluster_query_template
from .test_monitoring import validate_cluster_graph
from .test_monitoring import C_MONITORING_ANSWERS
from .test_monitoring import CLUSTER_MONITORING_APP
from .test_monitoring import MONITORING_OPERATOR_APP
from .test_monitoring import MONITORING_TEMPLATE_ID
from .test_monitoring import MONITORING_VERSION
from .test_monitoring import validate_cluster_monitoring_apps
from .test_rbac import create_user
from .test_rke_cluster_provisioning import engine_install_url
DO_ACCESSKEY = os.environ.get('DO_ACCESSKEY', "None")
RANCHER_S3_BUCKETNAME = os.environ.get('RANCHER_S3_BUCKETNAME', "None")
RANCHER_S3_ENDPOINT = os.environ.get('RANCHER_S3_ENDPOINT', "None")
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID', "None")
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', "None")
user_token = {"stduser_with_createrketemplate_role": {"user": None,
"token": None},
"standard_user": {"user": None, "token": None}}
@pytest.fixture(scope='module', autouse="True")
def setup(request):
client = get_admin_client()
# create users
user_token["stduser_with_createrketemplate_role"]["user"], \
user_token["stduser_with_createrketemplate_role"]["token"] = \
create_user(client)
user_token["standard_user"]["user"], \
user_token["standard_user"]["token"] = create_user(client)
stduser_with_createrketemplate_role_id = \
user_token["stduser_with_createrketemplate_role"]["user"].id
# Add clustertemplates-create global role binding to the standard user
client.create_global_role_binding(
globalRoleId="clustertemplates-create",
subjectKind="User",
userId=stduser_with_createrketemplate_role_id)
def get_k8s_versionlist():
# Get the list of K8s version supported by the rancher server
headers = {"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + ADMIN_TOKEN}
json_data = {
'responseType': 'json'
}
settings_url = CATTLE_TEST_URL + "/v3/settings/k8s-versions-current"
response = requests.get(settings_url, json=json_data,
verify=False, headers=headers)
json_response = (json.loads(response.content))
k8sversionstring = json_response['value']
k8sversionlist = k8sversionstring.split(",")
assert len(k8sversionlist) > 1
return k8sversionlist
def get_cluster_config(k8sversion, enableMonitoring="false"):
rke_config = getRKEConfig(k8sversion)
cluster_config = {
"dockerRootDir": "/var/lib/docker123",
"enableClusterAlerting": "false",
"enableClusterMonitoring": enableMonitoring,
"enableNetworkPolicy": "false",
"type": "clusterSpecBase",
"localClusterAuthEndpoint": {
"enabled": "true",
"type": "localClusterAuthEndpoint"
},
"rancherKubernetesEngineConfig": rke_config
}
return cluster_config
def get_cisscan_enabled_clusterconfig(k8sversion):
rke_config = getRKEConfig(k8sversion)
cluster_config = {
"dockerRootDir": "/var/lib/docker123",
"enableClusterAlerting": "false",
"enableClusterMonitoring": "false",
"enableNetworkPolicy": "false",
"type": "clusterSpecBase",
"localClusterAuthEndpoint": {
"enabled": "true",
"type": "localClusterAuthEndpoint"
},
"scheduledClusterScan": {
"enabled": "true",
"scanConfig": {
"cisScanConfig": {
"debugMaster": "false",
"debugWorker": "false",
"overrideBenchmarkVersion": CIS_SCAN_PROFILE,
"overrideSkip": "None",
"profile": "permissive",
"type": "/v3/schemas/cisScanConfig"
},
"type": "/v3/schemas/clusterScanConfig"
},
"scheduleConfig": {
"cronSchedule": "0 */1 * * *",
"retention": 24,
"type": "/v3/schemas/scheduledClusterScanConfig"
},
"type": "/v3/schemas/scheduledClusterScan"
},
"rancherKubernetesEngineConfig": rke_config
}
return cluster_config
def test_cluster_template_create_with_questions():
# Create a cluster template and revision with questions and create a
# cluster with the revision
k8sversionlist = get_k8s_versionlist()
cluster_config = get_cluster_config(k8sversionlist[0])
questions = [{
"variable": "rancherKubernetesEngineConfig.kubernetesVersion",
"required": "true",
"type": "string",
"default": k8sversionlist[0]
},
{
"variable": "rancherKubernetesEngineConfig.network.plugin",
"required": "true",
"type": "string",
"default": "canal"
},
{
"variable": "rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.bucketName",
"required": "true",
"type": "string",
"default": ""
},
{
"variable": "rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.endpoint",
"required": "true",
"type": "string",
"default": ""
},
{
"variable": "rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.accessKey",
"required": "true",
"type": "string",
"default": ""
},
{
"variable": "rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.secretKey",
"required": "true",
"type": "string",
"default": ""
}]
answers = {
"values": {
"rancherKubernetesEngineConfig.kubernetesVersion":
k8sversionlist[1],
"rancherKubernetesEngineConfig.network.plugin": "flannel",
"rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.bucketName": RANCHER_S3_BUCKETNAME,
"rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.endpoint": RANCHER_S3_ENDPOINT,
"rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.accessKey": AWS_ACCESS_KEY_ID,
"rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.secretKey": AWS_SECRET_ACCESS_KEY
}
}
standard_user_client = \
get_client_for_token(
user_token["stduser_with_createrketemplate_role"]["token"])
cluster_template = \
standard_user_client.create_cluster_template(
name=random_test_name("template"),
description="test-template")
clusterTemplateId = cluster_template.id
revision_name = random_test_name("revision")
cluster_template_revision = \
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config,
clusterTemplateId=clusterTemplateId,
enabled="true", questions=questions)
time.sleep(2)
cluster_template_revision = standard_user_client.reload(
cluster_template_revision)
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
cluster = create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=cluster_template_revision.id,
answers=answers, userToken=userToken)
# Verify that the cluster's applied spec has the parameters set as expected
assert cluster.appliedSpec.dockerRootDir == "/var/lib/docker123"
assert cluster.appliedSpec.localClusterAuthEndpoint.enabled is True
assert cluster.appliedSpec.rancherKubernetesEngineConfig.\
kubernetesVersion == k8sversionlist[1]
assert cluster.appliedSpec.rancherKubernetesEngineConfig.services.etcd.\
backupConfig.s3BackupConfig.bucketName == RANCHER_S3_BUCKETNAME
assert cluster.appliedSpec.rancherKubernetesEngineConfig.services.\
etcd.backupConfig.s3BackupConfig.endpoint == RANCHER_S3_ENDPOINT
assert cluster.appliedSpec.rancherKubernetesEngineConfig.services.etcd.\
backupConfig.s3BackupConfig.accessKey == AWS_ACCESS_KEY_ID
assert cluster.appliedSpec.rancherKubernetesEngineConfig.services.etcd.\
backupConfig.s3BackupConfig.type == "/v3/schemas/s3BackupConfig"
assert cluster.appliedSpec.rancherKubernetesEngineConfig.network.plugin ==\
"flannel"
check_cluster_version(cluster, k8sversionlist[1])
# Verify flannel pod in the kube-system namespace
cmd = "get pods -l k8s-app=flannel --namespace kube-system"
pod_result = execute_kubectl_cmd(cmd)
assert (len(["items"])) == 1
for pod in pod_result["items"]:
print(pod["metadata"]["name"])
assert "flannel" in (pod["metadata"]["name"])
# Perform Backup
backup = cluster.backupEtcd()
backupname = backup['metadata']['name']
etcdbackups = cluster.etcdBackups(name=backupname)
etcdbackupdata = etcdbackups['data']
s3backupconfig = etcdbackupdata[0]['backupConfig']['s3BackupConfig']
assert s3backupconfig['type'] == '/v3/schemas/s3BackupConfig'
backupId = etcdbackupdata[0]['id']
print("BackupId", backupId)
wait_for_backup_to_active(cluster, backupname)
cluster_cleanup(standard_user_client, cluster)
def test_cluster_template_create_edit_adminuser():
# Create an admin client . As an admin, create a RKE template and
# revisions R1 and R2. Create a cluster using R1.
# Edit and change revision to R2
cluster_template_create_edit(ADMIN_TOKEN)
def test_cluster_template_create_edit_stduser():
# Create a standard user client . As a standard user, create a RKE
# template and revisions R1 and R2. Create a cluster using R1.
# Edit and change revision to R2
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
cluster_template_create_edit(userToken)
def test_cluster_template_add_owner():
# This test case tests the owner member role of the cluster template
k8sversionlist = get_k8s_versionlist()
cluster_config1 = get_cluster_config(k8sversionlist[0])
cluster_config2 = get_cluster_config(k8sversionlist[1])
client = get_admin_client()
# As an Admin, create a cluster template and update the members
# list with the new user as owner
template_name = random_test_name("template")
cluster_template = client.create_cluster_template(
name=template_name, description="test-template")
principalid = user_token["standard_user"]["user"]["principalIds"]
members = [{
"type": "member",
"accessType": "owner",
"userPrincipalId": principalid
}]
cluster_template = client.update(cluster_template,
name=template_name,
members=members)
standard_user_client = \
get_client_for_token(user_token["standard_user"]["token"])
# As an owner of the template, create a revision using the template
# and also create a cluster using the template revision
revision_name = random_test_name("revision1")
cluster_template_revision = \
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision = standard_user_client.reload(
cluster_template_revision)
userToken = user_token["standard_user"]["token"]
cluster = create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=cluster_template_revision.id,
userToken=userToken)
# As an admin, create another template and a revision.
cluster_template_new = client.create_cluster_template(
name="new_template", description="newtest-template")
newrevision_name = random_test_name("revision2")
cluster_template_newrevision = \
client.create_cluster_template_revision(
name=newrevision_name,
clusterConfig=cluster_config2,
clusterTemplateId=cluster_template_new.id)
time.sleep(2)
cluster_template_newrevision = client.reload(
cluster_template_newrevision)
# Verify that the existing standard user cannot create a new revision using
# this template
with pytest.raises(ApiError) as e:
standard_user_client.create_cluster_template_revision(
name=random_test_name("userrevision"),
clusterConfig=cluster_config2,
clusterTemplateId=cluster_template_new.id)
print(e.value.error.status)
print(e.value.error.code)
assert e.value.error.status == 404
assert e.value.error.code == "NotFound"
userToken = user_token["standard_user"]["token"]
# Verify that the existing standard user cannot create a cluster
# using the new revision
with pytest.raises(ApiError) as e:
create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=cluster_template_newrevision.id,
userToken=userToken)
print(e)
assert e.value.error.status == 404
assert e.value.error.code == "NotFound"
cluster_cleanup(standard_user_client, cluster)
def test_cluster_template_add_readonly_member():
# This test case tests a read-only member role of the cluster template
k8sversionlist = get_k8s_versionlist()
cluster_config1 = get_cluster_config(k8sversionlist[0])
client = get_admin_client()
# As an Admin, create a cluster template and update the members
# list with the new standard user as read-only user
template_name = random_test_name("usertemplate")
cluster_template = client.create_cluster_template(
name=template_name, description="test-template")
principalid = user_token["standard_user"]["user"]["principalIds"]
members = [{
"type": "member",
"accessType": "read-only",
"userPrincipalId": principalid
}]
cluster_template = client.update(cluster_template,
name=template_name, members=members)
revision_name = random_test_name("revision1")
cluster_template_revision1 = client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision1 = client.reload(
cluster_template_revision1)
standard_user_client = \
get_client_for_token(user_token["standard_user"]["token"])
# As a read-only member of the rke template, verify that
# adding another revision to the template fails
revision_name = "userrevision"
with pytest.raises(ApiError) as e:
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
assert e.value.error.status == 403
assert e.value.error.code == 'PermissionDenied'
userToken = user_token["standard_user"]["token"]
# Verify that the read-only user can create a cluster with the existing
# template revision
cluster = create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=cluster_template_revision1.id,
userToken=userToken)
# As an admin, create another template and a revision.
cluster_template_new = client.create_cluster_template(
name="new_template", description="newtest-template")
revision_name = random_test_name("revision2")
cluster_template_newrevision = \
client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template_new.id)
# Verify that the existing standard user cannot create a cluster
# using the new revision
with pytest.raises(ApiError) as e:
create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=cluster_template_newrevision.id,
userToken=userToken)
print(e)
assert e.value.error.status == 404
assert e.value.error.code == "NotFound"
cluster_cleanup(standard_user_client, cluster)
def test_cluster_template_export():
# Create a DO cluster using rke config. Save a rketemplate from this
# cluster (with template name and revision V1).
# Create another cluster using the cluster template revision V1
k8sversionlist = get_k8s_versionlist()
standard_user_client = \
get_client_for_token(
user_token["stduser_with_createrketemplate_role"]["token"])
rke_config = getRKEConfig(k8sversionlist[0])
cluster_name = random_test_name("test-auto-export")
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
cluster = create_node_cluster(standard_user_client, cluster_name,
rancherKubernetesEngineConfig=rke_config,
userToken=userToken)
# Export a Template
cluster.saveAsTemplate(clusterTemplateName="testnewrketemplate",
clusterTemplateRevisionName="v1")
cluster = standard_user_client.reload(cluster)
templateid = cluster.clusterTemplateId
revisionid = cluster.clusterTemplateRevisionId
# Create a new cluster using the template revision just exported
newcluster = create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=revisionid, userToken=userToken)
newcluster = standard_user_client.reload(newcluster)
assert newcluster.appliedSpec.clusterTemplateId == templateid
assert newcluster.appliedSpec.clusterTemplateRevisionId == revisionid
cluster_cleanup(standard_user_client, cluster)
cluster_cleanup(standard_user_client, newcluster)
def test_cluster_template_enforcement_on_admin(request):
# As an admin turn ON enforcement and ensure that admin can create clusters
# using rke config and also using rke template
try:
enforcement_settings_url = CATTLE_TEST_URL + \
"/v3/settings/cluster-template-enforcement"
data_test = {
"name": "cluster-template-enforcement",
"value": "true"
}
headers = {"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + ADMIN_TOKEN}
response = requests.put(enforcement_settings_url, json=data_test,
verify=False, headers=headers)
print(response.content)
k8sversionlist = get_k8s_versionlist()
cluster_config1 = get_cluster_config(k8sversionlist[0])
rke_config = getRKEConfig(k8sversionlist[0])
# Verify creating cluster using rkeconfig succeeds
client = get_admin_client()
cluster_name = random_test_name("test-auto-rkeconfig")
rkecluster = \
create_node_cluster(client, cluster_name,
rancherKubernetesEngineConfig=rke_config,
userToken=ADMIN_TOKEN)
# Verify creating cluster using rke template succeeds
cluster_template = client.create_cluster_template(
name=random_test_name("template"), description="test-template")
revision_name = random_test_name("revision1")
cluster_template_revision1 = client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision1 = client.reload(
cluster_template_revision1)
cluster_name = random_test_name("test-auto")
cluster = create_node_cluster(
client, name=cluster_name,
clusterTemplateRevisionId=cluster_template_revision1.id,
userToken=ADMIN_TOKEN)
check_cluster_version(cluster, k8sversionlist[0])
# Reset the enforcement flag to false
finally:
data_test = {
"name": "cluster-template-enforcement",
"value": "false"
}
requests.put(enforcement_settings_url, json=data_test,
verify=False, headers=headers)
cluster_cleanup(client, cluster)
cluster_cleanup(client, rkecluster)
def test_cluster_template_enforcement_on_stduser():
# As an admin turn ON enforcement and ensure that standandard users
# can create clusters only using rke template. Creating clusters using
# regular rke config should not be allowed
standard_user_client = \
get_client_for_token(
user_token["stduser_with_createrketemplate_role"]["token"])
k8sversionlist = get_k8s_versionlist()
cluster_config1 = get_cluster_config(k8sversionlist[0])
rke_config = getRKEConfig(k8sversionlist[0])
try:
enforcement_settings_url = CATTLE_TEST_URL + \
"/v3/settings/cluster-template-enforcement"
data_test = {
"name": "cluster-template-enforcement",
"value": "true"
}
headers = {"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + ADMIN_TOKEN}
response = requests.put(enforcement_settings_url, json=data_test,
verify=False, headers=headers)
print(response.content)
# Verify creating cluster using rke template succeeds
cluster_template = standard_user_client.create_cluster_template(
name=random_test_name("template"), description="test-template")
revision_name = random_test_name("revision1")
cluster_template_revision1 = \
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision1 = standard_user_client.reload(
cluster_template_revision1)
cluster_name = random_test_name("test-auto")
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
cluster = create_node_cluster(
standard_user_client, name=cluster_name,
clusterTemplateRevisionId=cluster_template_revision1.id,
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[0])
# Verify creating cluster using rkeconfig fails. API returns error as:
# "MissingRequired : A clusterTemplateRevision to create a cluster"
cluster_name = random_test_name("test-auto-rkeconfig")
with pytest.raises(ApiError) as e:
create_node_cluster(standard_user_client, cluster_name,
rancherKubernetesEngineConfig=rke_config,
userToken=userToken)
print(e)
assert e.value.error.status == 422
assert e.value.error.code == "MissingRequired"
# Reset the enforcement flag to false
finally:
data_test = {
"name": "cluster-template-enforcement",
"value": "false"
}
requests.put(enforcement_settings_url, json=data_test,
verify=False, headers=headers)
cluster_cleanup(standard_user_client, cluster)
def test_cluster_template_create_with_cisscan_enabled():
k8sversionlist = get_k8s_versionlist()
# Obtain cluster config with cisscan enabled
cluster_config = get_cisscan_enabled_clusterconfig(k8sversionlist[0])
standard_user_client = \
get_client_for_token(
user_token["stduser_with_createrketemplate_role"]["token"])
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
# Create a cluster template
cluster_template = standard_user_client.create_cluster_template(
name=random_test_name("template"), description="cis-enabled-template")
revision_name = random_test_name("revision1")
# Create a cluster template revision with the cis enabled cluster config
cluster_template_revision = \
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision = standard_user_client.reload(
cluster_template_revision)
cluster_name = random_test_name("test-auto")
# Create a cluster using the cluster template revision
cluster = create_node_cluster(
standard_user_client, name=cluster_name,
clusterTemplateRevisionId=cluster_template_revision.id,
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[0])
# Verify that the cluster's applied spec has the cis scan parameters
# set as expected
assert cluster.appliedSpec. \
scheduledClusterScan.enabled == True
assert cluster.appliedSpec.scheduledClusterScan.\
scanConfig.type == "/v3/schemas/clusterScanConfig"
assert cluster.appliedSpec. \
scheduledClusterScan.scanConfig.\
cisScanConfig.overrideBenchmarkVersion == "rke-cis-1.4"
assert cluster.appliedSpec. \
scheduledClusterScan.scanConfig.cisScanConfig.profile == "permissive"
assert cluster.appliedSpec.scheduledClusterScan.scheduleConfig.\
cronSchedule == "0 */1 * * *"
cluster_cleanup(standard_user_client, cluster)
def test_cluster_template_create_with_monitoring():
k8sversionlist = get_k8s_versionlist()
# Obtain cluster config with monitoring enabled
cluster_config = get_cluster_config(k8sversionlist[0],
enableMonitoring="true")
standard_user_client = \
get_client_for_token(
user_token["stduser_with_createrketemplate_role"]["token"])
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
# Create a cluster template
cluster_template = standard_user_client.\
create_cluster_template(name=random_test_name("template"),
description="test-template")
revision_name = random_test_name("revision1")
# Create cluster template revision with monitoring enabled cluster config
cluster_template_revision = \
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision = standard_user_client.reload(
cluster_template_revision)
cluster_name = random_test_name("test-auto")
# Create a cluster using the cluster template revision
cluster = create_node_cluster(
standard_user_client, name=cluster_name, nodecount=3, nodesize="8gb",
clusterTemplateRevisionId=cluster_template_revision.id,
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[0])
assert cluster.appliedSpec.enableClusterMonitoring == True
# Verify the monitoring apps are deployed and active
system_project = \
standard_user_client.list_project(clusterId=cluster.id,
name="System").data[0]
sys_proj_client = get_project_client_for_token(system_project, USER_TOKEN)
wait_for_app_to_active(sys_proj_client, CLUSTER_MONITORING_APP, 1000)
wait_for_app_to_active(sys_proj_client, MONITORING_OPERATOR_APP, 1000)
# wait for all graphs to be available
time.sleep(60 * 3)
cluster_monitoring_obj = standard_user_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["resourceType"] = "cluster"
# Verify graphs are generated
validate_cluster_graph(query1, "cluster")
cluster_cleanup(standard_user_client, cluster)
def test_cluster_template_create_update_with_monitoring():
'''
Create an RKE template/revision T1/R1 with enable_cluster_monitoring:false
Create a cluster using revision R1.
Enable monitoring after cluster is active.
Create another revision R2 and update the cluster using R2 and setting
enableClusterMonitoring=false. The user will be able to upgrade the cluster
to this revision R2 but enable_cluster_monitoring flag from the template
should be ignored. Monitoring should continue to function in the cluster
'''
global MONITORING_VERSION
k8sversionlist = get_k8s_versionlist()
# Obtain cluster config with monitoring disabled enabled
cluster_config1 = get_cluster_config(k8sversionlist[0])
cluster_config2 = get_cluster_config(k8sversionlist[1])
standard_user_client = \
get_client_for_token(
user_token["stduser_with_createrketemplate_role"]["token"])
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
# Create a cluster template
cluster_template = standard_user_client.\
create_cluster_template(name=random_test_name("template"),
description="test-template")
revision_name1 = random_test_name("revision1")
revision_name2 = random_test_name("revision2")
# Create cluster template revision without enabling monitoring
cluster_template_revision1 = \
standard_user_client.create_cluster_template_revision(
name=revision_name1,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision1 = standard_user_client.reload(
cluster_template_revision1)
cluster_name = random_test_name("test-auto")
# Create a cluster using the cluster template revision created
cluster = create_node_cluster(
standard_user_client, name=cluster_name, nodecount=3, nodesize="8gb",
clusterTemplateRevisionId=cluster_template_revision1.id,
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[0])
assert cluster.clusterTemplateRevisionId == cluster_template_revision1.id
monitoring_template = standard_user_client.list_template(
id=MONITORING_TEMPLATE_ID).data[0]
if MONITORING_VERSION == "":
MONITORING_VERSION = monitoring_template.defaultVersion
print("MONITORING_VERSION=" + MONITORING_VERSION)
# Enable cluster monitoring using the standard user client
if cluster["enableClusterMonitoring"] is False:
standard_user_client.action(cluster, "enableMonitoring",
answers=C_MONITORING_ANSWERS,
version=MONITORING_VERSION)
system_project = standard_user_client.list_project(clusterId=cluster.id,
name="System").data[0]
sys_proj_client = get_project_client_for_token(system_project, userToken)
# Validate Cluster Monitoring Apps
validate_cluster_monitoring_apps(client=sys_proj_client)
# Create another cluster template revision with K8s version v2 and having
# the default setting enableClusterMonitoring=false
cluster_template_revision2 = \
standard_user_client.create_cluster_template_revision(
name=revision_name2,
clusterConfig=cluster_config2,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision2 = standard_user_client.reload(
cluster_template_revision2)
# Update cluster with with Revision R2
cluster = \
standard_user_client.update(
cluster, name=cluster_name,
clusterTemplateRevisionId=cluster_template_revision2.id)
time.sleep(2)
cluster = standard_user_client.reload(cluster)
cluster = validate_cluster_with_template(standard_user_client,
cluster,
intermediate_state="updating",
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[1])
assert cluster.clusterTemplateRevisionId == cluster_template_revision2.id
# Reload cluster object after an update
cluster = standard_user_client.reload(cluster)
# Validate Cluster Monitoring Apps
validate_cluster_monitoring_apps(client=sys_proj_client)
cluster_cleanup(standard_user_client, cluster)
def test_clustertemplate_readonly_member_edit_delete():
# As an admin, create cluster template/revision and provide "user/read-only
# access" to standard user. The user should not be able to edit/delete
# cluster template/revision
k8sversionlist = get_k8s_versionlist()
admin_client = get_admin_client()
cluster_config = get_cluster_config(k8sversionlist[0])
# Obtain the principal ID of the standard user
principalId = user_token["standard_user"]["user"]["principalIds"]
# Create a cluster template and provide standard user "user access" to the
# cluster template
members = [{"type": "member", "accessType": "read-only",
"userPrincipalId": principalId}]
cluster_template_name = random_test_name("template")
cluster_template = \
admin_client.create_cluster_template(name=cluster_template_name,
description="test-template",
members=members)
clusterTemplateId = cluster_template.id
# Create Cluster template revision
revision_name = random_test_name("revision")
cluster_template_revision = \
admin_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config,
clusterTemplateId=clusterTemplateId,
enabled="true")
standard_user_client = \
get_client_for_token(
user_token["standard_user"]["token"])
# Verify the standard user does not have permission to edit/update the
# template as he has only user access
members = [{"type": "member", "accessType": "read-only",
"userPrincipalId": principalId},
{"type": "member", "accessType": "read-only",
"groupPrincipalId": "*"}]
with pytest.raises(ApiError) as e:
standard_user_client.update(cluster_template,
name="sjtest",
description="test-template",
members=members)
assert e.value.error.status == 403
assert e.value.error.code == "PermissionDenied"
# Verify the standard user does not have permission to delete the
# revision as he has only user/read-only access
with pytest.raises(ApiError) as e:
standard_user_client.delete(cluster_template_revision)
assert e.value.error.status == 403
assert e.value.error.code == "PermissionDenied"
# Verify the standard user does not have permission to delete the
# template as he has only user access
with pytest.raises(ApiError) as e:
standard_user_client.delete(cluster_template)
assert e.value.error.status == 403
assert e.value.error.code == "PermissionDenied"
def validate_cluster_with_template(client, cluster,
intermediate_state="provisioning",
check_intermediate_state=True,
nodes_not_in_active_state=[],
k8s_version="", userToken=USER_TOKEN):
'''
In this method, we are checking cluster state, verifying state of workloads
in system project. For user workloads, we are just checking the state. We
are skipping the kubectl verification for user workloads because of this
issue: https://github.com/rancher/rancher/issues/27788
Hence this method is introduced locally in test_cluster_templates.py and
we are not using validate_cluster method from common.py
'''
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state)
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd", client)))
# check all workloads under the system project are active
# wait for workloads to be active
# time.sleep(DEFAULT_TIMEOUT)
print("checking if workloads under the system project are active")
sys_project = client.list_project(name='System',
clusterId=cluster.id).data[0]
sys_p_client = get_project_client_for_token(sys_project, userToken)
for wl in sys_p_client.list_workload().data:
wait_for_wl_to_active(sys_p_client, wl,
timeout=DEFAULT_CLUSTER_STATE_TIMEOUT)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonSet
project, ns = create_project_and_ns(userToken, cluster)
p_client = get_project_client_for_token(project, userToken)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
return cluster
def cluster_template_create_edit(userToken):
# Method to create cluster template revisions R1, R2.
# Create a cluster with a RKE template revision R1.
# Then edit the cluster and change the revision to R2
k8sversionlist = get_k8s_versionlist()
cluster_config1 = get_cluster_config(k8sversionlist[0])
cluster_config2 = get_cluster_config(k8sversionlist[1])
client = get_client_for_token(userToken)
cluster_template = client.create_cluster_template(
name=random_test_name("template"), description="test-template")
revision1_name = random_test_name("revision1")
cluster_template_revision1 = client.create_cluster_template_revision(
name=revision1_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision1 = client.reload(
cluster_template_revision1)
cluster_name = random_test_name("test-auto")
cluster = create_node_cluster(
client, name=cluster_name,
clusterTemplateRevisionId=cluster_template_revision1.id,
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[0])
assert cluster.clusterTemplateRevisionId == cluster_template_revision1.id
revision2_name = random_test_name("revision2")
cluster_template_revision2 = client.create_cluster_template_revision(
name=revision2_name,
clusterConfig=cluster_config2,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision2 = client.reload(
cluster_template_revision2)
cluster = \
client.update(
cluster, name=cluster_name,
clusterTemplateRevisionId=cluster_template_revision2.id)
cluster = validate_cluster_with_template(client,
cluster,
intermediate_state="updating",
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[1])
assert cluster.clusterTemplateRevisionId == cluster_template_revision2.id
cluster_cleanup(client, cluster)
def node_template_digocean(userclient, nodesize):
client = userclient
do_cloud_credential_config = {"accessToken": DO_ACCESSKEY}
do_cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig=do_cloud_credential_config)
time.sleep(3)
node_template = client.create_node_template(
digitaloceanConfig={"region": "nyc3",
"size": nodesize,
"image": "ubuntu-18-04-x64"},
name=random_name(),
driver="digitalocean",
namespaceId="dig",
cloudCredentialId=do_cloud_credential.id,
engineInstallURL=engine_install_url,
useInternalIpAddress=True)
node_template = client.wait_success(node_template)
return node_template
def create_node_cluster(userclient, name, nodecount=1, nodesize="4gb",
clusterTemplateRevisionId=None,
rancherKubernetesEngineConfig=None, answers=None,
userToken=None):
client = userclient
if(rancherKubernetesEngineConfig is not None):
cluster = client.create_cluster(
name=name,
rancherKubernetesEngineConfig=rancherKubernetesEngineConfig)
else:
cluster = \
client.create_cluster(
name=name,
clusterTemplateRevisionId=clusterTemplateRevisionId,
answers=answers)
nodetemplate = node_template_digocean(client, nodesize)
nodes = []
node = {"hostnamePrefix": random_test_name("test-auto"),
"nodeTemplateId": nodetemplate.id,
"requestedHostname": "test-auto-template",
"controlPlane": True,
"etcd": True,
"worker": True,
"quantity": nodecount,
"clusterId": None}
nodes.append(node)
node_pools = []
for node in nodes:
node["clusterId"] = cluster.id
success = False
start = time.time()
while not success:
if time.time() - start > 10:
raise AssertionError(
"Timed out waiting for cluster owner global Roles")
try:
time.sleep(1)
node_pool = client.create_node_pool(**node)
success = True
except ApiError:
success = False
node_pool = client.wait_success(node_pool)
node_pools.append(node_pool)
cluster = validate_cluster(client, cluster, userToken=userToken)
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) == len(nodes)
for node in nodes:
assert node.state == "active"
return cluster
def getRKEConfig(k8sversion):
rke_config = {
"addonJobTimeout": 30,
"ignoreDockerVersion": "true",
"sshAgentAuth": "false",
"type": "rancherKubernetesEngineConfig",
"kubernetesVersion": k8sversion,
"authentication": {
"strategy": "x509",
"type": "authnConfig"
},
"network": {
"plugin": "canal",
"type": "networkConfig",
"options": {
"flannel_backend_type": "vxlan"
}
},
"ingress": {
"provider": "nginx",
"type": "ingressConfig"
},
"monitoring": {
"provider": "metrics-server",
"type": "monitoringConfig"
},
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
},
"etcd": {
"creation": "12h",
"extraArgs": {
"heartbeat-interval": 500,
"election-timeout": 5000
},
"retention": "72h",
"snapshot": "false",
"type": "etcdService",
"backupConfig": {
"enabled": "true",
"intervalHours": 12,
"retention": 6,
"type": "backupConfig",
"s3BackupConfig": {
"type": "s3BackupConfig",
"accessKey": AWS_ACCESS_KEY_ID,
"secretKey": AWS_SECRET_ACCESS_KEY,
"bucketName": "test-auto-s3",
"endpoint": "s3.amazonaws.com"
}
}
}
}
}
return rke_config
| 45,280 | 39.32146 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/cli_common.py
|
import os
import logging
import sys
import time
from .common import run_command, run_command_with_stderr
logging.basicConfig(stream=sys.stdout,
level=os.environ.get("LOGLEVEL", "INFO"),
format='%(asctime)s - %(filename)s:%(funcName)s'
':%(lineno)d - [%(levelname)5s]: %(message)s',
datefmt="%H:%M:%S")
DEFAULT_TIMEOUT = 60
class BaseCli:
log = logging.getLogger(__name__)
DEFAULT_CONTEXT = os.environ.get('DEFAULT_CONTEXT', None)
@classmethod
def run_command(cls, command, expect_error=False):
command = "rancherctl {}".format(command)
cls.log.debug("run cmd:\t%s", command)
if expect_error:
result = run_command_with_stderr(command, log_out=False)
else:
result = run_command(command, log_out=False)
cls.log.debug("returns:\t%s", result)
return result
def set_log_level(self, level):
self.log.setLevel(level)
def login(self, url, token, **kwargs):
context = kwargs.get("context", self.DEFAULT_CONTEXT)
if context is None:
raise ValueError("No context supplied for rancher login!")
cmd = "login {} --token {} --context {} --skip-verify".format(
url, token, context)
self.run_command(cmd, expect_error=True)
def switch_context(self, project_id):
self.run_command("context switch {}".format(project_id),
expect_error=True)
def get_context(self):
result = self.run_command("context current")
cluster_name = result[8:result.index(" ")].strip()
project_name = result[result.index("Project:") + 8:].strip()
return cluster_name, project_name
def get_cluster_by_name(self, name):
for c in self.get_clusters():
if c["name"] == name:
return c
def get_current_cluster(self):
for c in self.get_clusters():
if c["current"]:
return c
def get_clusters(self):
result = self.run_command("clusters ls --format '{{.Cluster.ID}}"
"|{{.Cluster.Name}}|{{.Current}}|{{.Cluster.UUID}}'")
clusters = []
for c in result.splitlines():
c = c.split("|")
cluster = {
"id": c[0],
"name": c[1],
"current": c[2] == "*",
"uuid": c[3]
}
clusters.append(cluster)
return clusters
def inspect(self, resource_type, resource_id, **kwargs):
resource_format = kwargs.get("format", "{{.id}}")
result = self.run_command("inspect --type {} --format '{}' {}".format(
resource_type, resource_format, resource_id))
return result.strip()
def ps(self):
return self.run_command(
"ps --format '{{.NameSpace}}|{{.Name}}|{{.Image}}|{{.Scale}}'")
def kubectl(self, cmd):
return self.run_command("kubectl {}".format(cmd))
def wait_for_ready(self, command, val_to_check, **kwargs):
timeout = kwargs.get("timeout", DEFAULT_TIMEOUT)
condition_func = kwargs.get("condition_func",
lambda val, l: val in l.splitlines())
done = False
start_time = time.time()
while not done and time.time() - start_time < timeout:
result = self.run_command(command)
if condition_func(val_to_check, result):
done = True
return done
| 3,552 | 34.178218 | 87 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_ebs_volume_backed_instance.py
|
import os
import pytest
import time
from lib.aws import AmazonWebServices
from rancher import ApiError
from .common import (
AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY,
AWS_REGION,
AWS_SG,
AWS_SUBNET,
AWS_VPC,
AWS_ZONE,
DEFAULT_TIMEOUT,
cluster_cleanup,
get_user_client,
random_name,
wait_for_cluster_delete,
wait_for_nodes_to_become_active
)
from .test_rke_cluster_provisioning import (
validate_rke_dm_host_1,
engine_install_url
)
def test_provision_encrypted_instance(client,
encrypted_cluster_nodepool):
"""
Provisions an EC2 nodepool with encrypted EBS volume backed instances by providing
a flag on the node template and ensures that the provisioned instances are encrypted
"""
cluster, nodepools = encrypted_cluster_nodepool
aws_nodes = get_aws_nodes_from_nodepools(client, cluster, nodepools)
check_if_volumes_are_encrypted(aws_nodes)
def get_aws_nodes_from_nodepools(client, cluster, nodepools):
"""
Retrieves the AWS Nodes related to the nodes in the nodepool so that
methods invoking the AWS CLI defined in aws.py can be called on the nodes
"""
wait_for_nodes_to_become_active(client, cluster)
aws_nodes = []
for nodepool in nodepools:
nodes = nodepool.nodes().data
for node in nodes:
node_ip_address = node['ipAddress']
ip_address_filter = [{
'Name': 'private-ip-address', 'Values': [node_ip_address]}]
nodes = AmazonWebServices().get_nodes(ip_address_filter)
assert len(nodes) == 1, \
"Multiple aws_nodes seem to have private-ip-address %s" \
% node_ip_address
aws_nodes.append(nodes[0])
return aws_nodes
def check_if_volumes_are_encrypted(aws_nodes):
"""
Given a set of AWS Nodes, return whether the nodes have encrypted EBS volumes
"""
for aws_node in aws_nodes:
provider_node_id = aws_node.provider_node_id
volumes = AmazonWebServices().get_ebs_volumes(provider_node_id)
for volume in volumes:
assert volume['Encrypted']
@pytest.fixture('module')
def client():
"""
A user client to be used in tests
"""
return get_user_client()
@pytest.fixture(scope='module')
def node_template_ec2_with_encryption(client):
"""
A node template that defines a set of encrypted EC2 volume backed instances
"""
def _attempt_delete_node_template(client, node_template,
timeout=DEFAULT_TIMEOUT,
sleep_time=.5):
start = time.time()
while node_template:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for node template %s to get deleted"
% node_template["name"])
time.sleep(sleep_time)
client.reload(node_template)
try:
client.delete(node_template)
break
except ApiError:
pass
except Exception as e:
raise e
ec2_cloud_credential_config = {"accessKey": AWS_ACCESS_KEY_ID,
"secretKey": AWS_SECRET_ACCESS_KEY}
ec2_cloud_credential = client.create_cloud_credential(
amazonec2credentialConfig=ec2_cloud_credential_config
)
amazonec2Config = {
"instanceType": "t3a.medium",
"region": AWS_REGION,
"rootSize": "16",
"securityGroup": [AWS_SG],
"sshUser": "ubuntu",
"subnetId": AWS_SUBNET,
"usePrivateAddress": False,
"volumeType": "gp2",
"vpcId": AWS_VPC,
"zone": AWS_ZONE,
"encryptEbsVolume": True
}
node_template = client.create_node_template(
amazonec2Config=amazonec2Config,
name=random_name(),
useInternalIpAddress=True,
driver="amazonec2",
engineInstallURL=engine_install_url,
cloudCredentialId=ec2_cloud_credential.id
)
node_template = client.wait_success(node_template)
yield node_template
_attempt_delete_node_template(client, node_template)
@pytest.fixture('module')
def encrypted_cluster_nodepool(client, node_template_ec2_with_encryption):
"""
Returns a cluster with a single nodepool of encrypted EBS volume backed EC2 instances
"""
cluster, nodepools = validate_rke_dm_host_1(
node_template_ec2_with_encryption,
attemptDelete=False)
yield (cluster, nodepools)
cluster_cleanup(client, cluster)
wait_for_cluster_delete(client, cluster["name"])
| 4,698 | 30.965986 | 89 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_node_template.py
|
import os
import pytest
from rancher import ApiError
from .common import * # NOQA
from .test_rke_cluster_provisioning import rke_config
from .test_rke_cluster_provisioning import random_node_name
from .test_rke_cluster_provisioning import create_and_validate_cluster
DO_ACCESSKEY = os.environ.get('DO_ACCESSKEY', "None")
engine_install_url = "https://releases.rancher.com/install-docker/20.10.sh"
user_clients = {"admin": None, "standard_user_1": None,
"standard_user_2": None}
# --------------------- rbac tests for node template -----------------------
@if_test_rbac
def test_rbac_node_template_create(remove_resource):
# As std user, create a node template
node_template = create_node_template_do(user_clients["standard_user_1"])
remove_resource(node_template)
templates = user_clients["standard_user_1"].list_node_template(
name=node_template.name)
assert len(templates) == 1
@if_test_rbac
def test_rbac_node_template_list(remove_resource):
# User client should be able to list template it has created
node_template = create_node_template_do(user_clients["standard_user_1"])
templates = user_clients["standard_user_1"].list_node_template(
name=node_template.name)
remove_resource(node_template)
assert len(templates) == 1
# Admin should be able to list template
templates = user_clients["admin"].list_node_template(
name=node_template.name)
assert len(templates) == 1
# User 2 should not be able to list templates
templates2 = user_clients["standard_user_2"].list_node_template(
name=node_template.name)
assert len(templates2) == 0
@if_test_rbac
def test_rbac_node_template_delete(remove_resource):
# User client should be able to delete template it has created
node_template = create_node_template_do(user_clients["standard_user_1"])
# User1 should be able to delete own template
user_clients["standard_user_1"].delete(node_template)
templates = user_clients["standard_user_1"].list_node_template(
name=node_template.name)
assert len(templates) == 0
# Admin should be able to delete template created by user1
node_template2 = create_node_template_do(user_clients["standard_user_1"])
user_clients["admin"].delete(node_template2)
templates = user_clients["standard_user_1"].list_node_template(
name=node_template2.name)
assert len(templates) == 0
# User 2 should not be able to delete template created by user1
node_template3 = create_node_template_do(user_clients["standard_user_1"])
remove_resource(node_template3)
with pytest.raises(ApiError) as e:
user_clients["standard_user_2"].delete(node_template3)
assert e.value.error.status == 403
@if_test_rbac
def test_rbac_node_template_edit(remove_resource):
# User client should be able to edit template it has created
node_template = create_node_template_do(user_clients["standard_user_1"])
remove_resource(node_template)
# User1 should be able to edit own template
name_edit=random_name()
user_clients["standard_user_1"].update(node_template, name=name_edit,
digitaloceanConfig=
{"region": "nyc3",
"size": "2gb",
"image": "ubuntu-16-04-x64"})
templates = user_clients["standard_user_1"].list_node_template(
name=name_edit)
assert len(templates) == 1
# Admin should be able to edit template created by user1
name_edit=random_name()
user_clients["admin"].update(node_template, name=name_edit,
digitaloceanConfig=
{"region": "nyc3",
"size": "2gb",
"image": "ubuntu-16-04-x64"})
templates = user_clients["standard_user_1"].list_node_template(
name=name_edit)
assert len(templates) == 1
# User 2 should not be able to edit template created by user1
with pytest.raises(ApiError) as e:
user_clients["standard_user_2"].update(node_template,
name=random_name(),
digitaloceanConfig=
{"region": "nyc3",
"size": "2gb",
"image": "ubuntu-16-04-x64"})
assert e.value.error.status == 403
@if_test_rbac
def test_rbac_node_template_deploy_cluster(remove_resource):
# Admin should be able to use template to create cluster
node_template = create_node_template_do(user_clients["standard_user_1"])
create_and_validate_do_cluster(node_template)
# -------------- rbac tests for cloud credentials --------------
@if_test_rbac
def test_rbac_cloud_credential_create(remove_resource):
# As std user, create a node template
cloud_credential = create_cloud_credential_do(user_clients[
"standard_user_1"])
remove_resource(cloud_credential)
credentials = user_clients["standard_user_1"].list_cloud_credential(
name=cloud_credential.name)
assert len(credentials) == 1
@if_test_rbac
def test_rbac_cloud_credential_list(remove_resource):
# User client should be able to list credential it has created
cloud_credential = create_cloud_credential_do(user_clients[
"standard_user_1"])
remove_resource(cloud_credential)
credentials = user_clients["standard_user_1"].list_cloud_credential(
name=cloud_credential.name)
assert len(credentials) == 1
# Admin should be able to list credential
credentials = user_clients["admin"].list_cloud_credential(
name=cloud_credential.name)
assert len(credentials) == 1
# User 2 should not be able to list credential
credentials2 = user_clients["standard_user_2"].list_cloud_credential(
name=cloud_credential.name)
assert len(credentials2) == 0
@if_test_rbac
def test_rbac_cloud_credential_delete(remove_resource):
# User client should be able to delete credential it has created
cloud_credential = create_cloud_credential_do(user_clients[
"standard_user_1"])
# User1 should be able to delete own credential
user_clients["standard_user_1"].delete(cloud_credential)
credentials = user_clients["standard_user_1"].list_cloud_credential(
name=cloud_credential.name)
assert len(credentials) == 0
# Admin should be able to delete credential created by user1
cloud_credential2 = create_cloud_credential_do(user_clients[
"standard_user_1"])
user_clients["admin"].delete(cloud_credential2)
credentials = user_clients["standard_user_1"].list_cloud_credential(
name=cloud_credential2.name)
assert len(credentials) == 0
# User 2 should not be able to delete credential created by user1
cloud_credential3 = create_cloud_credential_do(user_clients[
"standard_user_1"])
remove_resource(cloud_credential3)
with pytest.raises(ApiError) as e:
user_clients["standard_user_2"].delete(cloud_credential3)
assert e.value.error.status == 403
@if_test_rbac
def test_rbac_cloud_credential_edit(remove_resource):
# User client should be able to edit credential it has created
cloud_credential = create_cloud_credential_do(user_clients[
"standard_user_1"])
remove_resource(cloud_credential)
# User1 should be able to edit own credential
do_cloud_credential_config = {"name": "testName1"}
user_clients["standard_user_1"].update(cloud_credential,
digitaloceancredentialConfig=
do_cloud_credential_config)
# Admin should be able to edit credential created by user1
do_cloud_credential_config = {"name": "testname2"}
user_clients["admin"].update(cloud_credential,
digitaloceancredentialConfig=
do_cloud_credential_config)
# User 2 should not be able to edit credential created by user1
with pytest.raises(ApiError) as e:
do_cloud_credential_config = {"name": "testname3"}
user_clients["standard_user_2"].update(cloud_credential,
digitaloceancredentialConfig=
do_cloud_credential_config)
assert e.value.error.status == 403
@if_test_rbac
def test_rbac_cloud_credential_deploy_cluster(remove_resource):
# Admin should be able to use credential created by user1
# to create a cluster using a node template
cloud_credential = create_cloud_credential_do(user_clients[
"standard_user_1"])
node_template = create_node_template_do(user_clients["standard_user_1"],
cloud_credential)
create_and_validate_do_cluster(node_template)
# --------------------- helper functions -----------------------
def create_node_template_do(client, cloud_credential=None):
if cloud_credential:
do_cloud_credential = cloud_credential
else:
do_cloud_credential_config = {"accessToken": DO_ACCESSKEY}
do_cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig=do_cloud_credential_config
)
node_template = client.create_node_template(
digitaloceanConfig={"region": "nyc3",
"size": "2gb",
"image": "ubuntu-18-04-x64"},
name=random_name(),
driver="digitalocean",
cloudCredentialId=do_cloud_credential.id,
engineInstallURL=engine_install_url,
useInternalIpAddress=True)
node_template = client.wait_success(node_template)
return node_template
def create_cloud_credential_do(client):
do_cloud_credential_config = {"accessToken": DO_ACCESSKEY}
do_cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig=do_cloud_credential_config
)
return do_cloud_credential
def create_and_validate_do_cluster(node_template,
rancherKubernetesEngineConfig=rke_config,
attemptDelete=True):
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"controlPlane": True,
"etcd": True,
"worker": True,
"quantity": 1,
"clusterId": None}
nodes.append(node)
cluster, node_pools = create_and_validate_cluster(
user_clients["admin"], nodes, rancherKubernetesEngineConfig,
clusterName=random_name())
if attemptDelete:
cluster_cleanup(user_clients["admin"], cluster)
else:
return cluster, node_pools
@pytest.fixture(autouse="True")
def create_project_client(request):
user_clients["standard_user_1"] = get_user_client()
user_clients["admin"] = get_admin_client()
user1, user1_token = create_user(user_clients["admin"])
user_clients["standard_user_2"] = get_client_for_token(user1_token)
def fin():
user_clients["admin"].delete(user1)
request.addfinalizer(fin)
| 12,187 | 44.140741 | 78 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_websocket.py
|
import base64
import pytest
import time
import urllib
from .common import CATTLE_TEST_URL
from .common import USER_TOKEN
from .common import TEST_IMAGE
from .common import create_kubeconfig
from .common import create_connection
from .common import create_project_and_ns
from .common import get_user_client_and_cluster
from .common import get_project_client_for_token
from .common import random_test_name
from .common import validate_workload
from .common import WebsocketLogParse
namespace = {"cluster": None, "shell_url": None, "pod": None, "ns": ""}
def test_websocket_launch_kubectl():
ws = create_connection(namespace["shell_url"], ["base64.channel.k8s.io"])
logparse = WebsocketLogParse()
logparse.start_thread(target=logparse.receiver, args=(ws, True))
cmd = "kubectl version"
checks = ["Client Version", "Server Version"]
validate_command_execution(ws, cmd, logparse, checks)
logparse.last_message = ''
cmd = "kubectl get ns -o name"
checks = ["namespace/kube-system"]
validate_command_execution(ws, cmd, logparse, checks)
logparse.last_message = ''
ws.close()
def test_websocket_exec_shell():
url_base = 'wss://' + CATTLE_TEST_URL[8:] + \
'/k8s/clusters/' + namespace["cluster"].id + \
'/api/v1/namespaces/' + namespace["ns"] + \
'/pods/' + namespace["pod"].name + \
'/exec?container=' + namespace["pod"].containers[0].name
params_dict = {
"stdout": 1,
"stdin": 1,
"stderr": 1,
"tty": 1,
"command": [
'/bin/sh',
'-c',
'TERM=xterm-256color; export TERM; [ -x /bin/bash ] && ([ -x '
'/usr/bin/script ] && /usr/bin/script -q -c "/bin/bash" '
'/dev/null || exec /bin/bash) || exec /bin/sh '
]
}
params = urllib.parse.urlencode(params_dict, doseq=True,
quote_via=urllib.parse.quote, safe='()')
url = url_base + "&" + params
ws = create_connection(url, ["base64.channel.k8s.io"])
logparse = WebsocketLogParse()
logparse.start_thread(target=logparse.receiver, args=(ws, True))
cmd = "ls"
checks = ["bin", "boot", "dev"]
validate_command_execution(ws, cmd, logparse, checks)
logparse.last_message = ''
ws.close()
def test_websocket_view_logs():
url_base = 'wss://' + CATTLE_TEST_URL[8:] + \
'/k8s/clusters/' + namespace["cluster"].id + \
'/api/v1/namespaces/' + namespace["ns"] + \
'/pods/' + namespace["pod"].name + \
'/log?container=' + namespace["pod"].containers[0].name
params_dict = {
"tailLines": 500,
"follow": True,
"timestamps": True,
"previous": False,
}
params = urllib.parse.urlencode(params_dict, doseq=True,
quote_via=urllib.parse.quote, safe='()')
url = url_base + "&" + params
ws = create_connection(url, ["base64.binary.k8s.io"])
logparse = WebsocketLogParse()
logparse.start_thread(target=logparse.receiver, args=(ws, False))
print('\noutput:\n' + logparse.last_message + '\n')
assert 'websocket' in logparse.last_message, \
"failed to view logs"
logparse.last_message = ''
ws.close()
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
project, ns = create_project_and_ns(USER_TOKEN,
cluster,
random_test_name("websocket"))
p_client = get_project_client_for_token(project, USER_TOKEN)
con = [{"name": random_test_name(),
"image": TEST_IMAGE,
"entrypoint": ["/bin/sh"],
"command": ["-c",
"while true; do echo websocket; sleep 1s; done;"
],
}]
wl = p_client.create_workload(name=random_test_name(),
containers=con,
namespaceId=ns.id)
validate_workload(p_client, wl, "deployment", ns.name)
pod = p_client.list_pod(workloadId=wl.id).data[0]
namespace["ns"] = ns.name
namespace["pod"] = pod
namespace["cluster"] = cluster
namespace["shell_url"] = cluster.get("links").get("shell")
def fin():
client.delete(project)
request.addfinalizer(fin)
def send_a_command(ws_connection, command):
cmd_enc = base64.b64encode(command.encode('utf-8')).decode('utf-8')
ws_connection.send('0' + cmd_enc)
# sends the command to the webSocket
ws_connection.send('0DQ==')
time.sleep(5)
def validate_command_execution(websocket, command, log_obj, checking):
"""
validate that a command is send via the websocket
and the response contains expected results
:param websocket: the websocket object
:param command: the command to run
:param log_obj: the logparse object to receive the message
:param checking: the list of string to be checked in the response message
:return:
"""
send_a_command(websocket, command)
print('\nshell command and output:\n' + log_obj.last_message + '\n')
for i in checking:
assert i in log_obj.last_message, "failed to run the command"
| 5,369 | 34.098039 | 77 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_project_quota.py
|
import os
import pytest
from .common import * # NOQA
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
CLUSTER_NAME = os.environ.get("CLUSTER_NAME", "")
RANCHER_CLEANUP_PROJECT = os.environ.get("RANCHER_CLEANUP_PROJECT", "True")
@pytest.fixture
def ns_default_quota():
return {"limit": {"pods": "5",
"requestsCpu": "500m"}}
@pytest.fixture
def default_project_quota():
return {"limit": {"pods": "20",
"requestsCpu": "2000m"}}
def ns_quota():
return {"limit": {"pods": "10",
"requestsCpu": "500m"}}
def test_create_project_quota():
# Create Project Resource Quota and verify quota is created
# successfully. Verify namespacedefault resource quota is set
cluster = namespace["cluster"]
client = get_user_client()
c_client = namespace["c_client"]
quota = default_project_quota()
nsquota = ns_default_quota()
proj = client.create_project(name='test-' + random_str(),
clusterId=cluster.id,
resourceQuota=quota,
namespaceDefaultResourceQuota=nsquota)
proj = client.wait_success(proj)
assert proj.resourceQuota is not None
assert proj.resourceQuota.limit.pods == quota["limit"]["pods"]
assert proj.resourceQuota.limit.requestsCpu == \
quota["limit"]["requestsCpu"]
assert proj.namespaceDefaultResourceQuota is not None
assert proj.namespaceDefaultResourceQuota.limit.pods == \
nsquota["limit"]["pods"]
ns = create_ns(c_client, cluster, proj)
print(ns)
assert ns is not None
assert ns.resourceQuota is not None
assert ns.resourceQuota.limit.pods == nsquota["limit"]["pods"]
assert ns.resourceQuota.limit.requestsCpu == \
nsquota["limit"]["requestsCpu"]
validate_resoucequota_thru_kubectl(ns)
def test_resource_quota_create_namespace_with_ns_quota():
# Create project quota and namspaces and verify
# namespace creation is allowed within the quota
cluster = namespace["cluster"]
client = get_user_client()
c_client = namespace["c_client"]
quota = default_project_quota()
nsquota = ns_quota()
proj = client.create_project(name='test-' + random_str(),
clusterId=cluster.id,
resourceQuota=quota,
namespaceDefaultResourceQuota=quota)
proj = client.wait_success(proj)
assert proj.resourceQuota is not None
# Create a namespace with namespace resource quota
ns_name = random_str()
ns = c_client.create_namespace(name=ns_name,
projectId=proj.id,
resourceQuota=ns_quota())
ns = c_client.wait_success(ns)
assert ns is not None
assert ns.resourceQuota is not None
assert ns.resourceQuota.limit.pods == nsquota["limit"]["pods"]
assert ns.resourceQuota.limit.requestsCpu == \
nsquota["limit"]["requestsCpu"]
validate_resoucequota_thru_kubectl(ns)
# Create another namespace with quota and it should succeed
ns1 = c_client.create_namespace(name=random_str(),
projectId=proj.id,
resourceQuota=nsquota)
ns1 = c_client.wait_success(ns1)
print(ns1)
assert ns1 is not None
assert ns1.resourceQuota is not None
assert ns1.resourceQuota.limit.pods == nsquota["limit"]["pods"]
assert ns1.resourceQuota.limit.requestsCpu == \
nsquota["limit"]["requestsCpu"]
validate_resoucequota_thru_kubectl(ns1)
# Creating another namespace should fail as it exceeds the allotted limit
try:
c_client.create_namespace(name=random_str(),
projectId=proj.id,
resourceQuota=ns_quota())
except Exception as e:
errorstring = str(e)
print(str(e))
assert "MaxLimitExceeded" in errorstring
def test_namespace_quota_edit(remove_resource):
client, cluster = get_global_admin_client_and_cluster()
pj1 = client.create_project(name=random_str(),
clusterId=cluster.id,
resourceQuota={"limit": {"limitsCpu": "20m"}},
namespaceDefaultResourceQuota={
"limit": {"limitsCpu": "10m"}})
pj2 = client.create_project(name=random_str(),
clusterId=cluster.id,
resourceQuota={"limit": {"limitsCpu": "15m"}},
namespaceDefaultResourceQuota={
"limit": {"limitsCpu": "15m"}})
p_client = get_cluster_client_for_token(cluster, ADMIN_TOKEN)
ns1 = p_client.create_namespace(name=random_str(),
clusterId=cluster.id,
projectId=pj1.id)
ns2 = p_client.create_namespace(name=random_str(),
clusterId=cluster.id,
projectId=pj2.id)
ns1 = p_client.wait_success(ns1)
ns2 = p_client.wait_success(ns2)
p_client.action(obj=ns2,
action_name="move",
projectId=None)
ns1 = p_client.update(ns1,
resourceQuota={"limit": {"limitsCpu": "11m"}})
ns1_limitsCpu = ns1.resourceQuota.limit.limitsCpu
assert ns1_limitsCpu == "11m"
remove_resource(pj1)
remove_resource(pj2)
remove_resource(ns2)
def validate_resoucequota_thru_kubectl(namespace):
# This method executes `kubectl describe quota command` and verifies if the
# resource quota from kubectl and the quota assigned for the namespace
# through API are the same
command = "get quota --namespace " + namespace['id']
print(command)
result = execute_kubectl_cmd(command, json_out=True)
print("Kubectl command result")
print(result)
testdict = namespace['resourceQuota']
response = result["items"]
assert "spec" in response[0]
quotadict = (response[0]["spec"])
assert quotadict['hard']['pods'] == testdict['limit']['pods']
assert \
quotadict['hard']['requests.cpu'] == testdict['limit']['requestsCpu']
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testworkload")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
| 7,019 | 33.07767 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_oke_cluster.py
|
import pytest
from .common import * # NOQA
OCI_TENANCY_OCID = os.environ.get('RANCHER_OCI_TENANCY_OCID', "")
OCI_COMPARTMENT_OCID = os.environ.get('RANCHER_OCI_COMPARTMENT_OCID', "")
OCI_USER_OCID = os.environ.get('RANCHER_OCI_USER_OCID', "")
OCI_FINGERPRINT = os.environ.get('RANCHER_OCI_FINGERPRINT', "")
OCI_PRIVATE_KEY_PATH = os.environ.get('RANCHER_OCI_PRIVATE_KEY_PATH', "")
OCI_PRIVATE_KEY_PASSPHRASE = os.environ.get('RANCHER_OCI_PRIVATE_KEY_PASSPHRASE', "")
OCI_REGION = os.environ.get('RANCHER_OCI_REGION', None)
OKE_VERSION = os.environ.get('RANCHER_OKE_VERSION', None)
OKE_NODE_SHAPE = os.environ.get('RANCHER_OKE_NODE_SHAPE', None)
OKE_NODE_IMAGE = os.environ.get('RANCHER_OKE_NODE_IMAGE', None)
okecredential = pytest.mark.skipif(not (
OCI_TENANCY_OCID and OCI_COMPARTMENT_OCID and OCI_USER_OCID and OCI_FINGERPRINT and OCI_PRIVATE_KEY_PATH),
reason='OKE Credentials not provided, '
'cannot create cluster')
def test_error_get_oke_latest_versions_missing_key():
oci_cred_body = {
"tenancyOCID": "ocid1.tenancy.oc1..aaaaaa",
"userOCID": "ocid1.user.oc1..aaaaaaaa",
"region": "us-ashburn-1",
"fingerprint": "xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx",
}
response = get_oci_meta_response("/meta/oci/okeVersions", oci_cred_body)
assert response.content is not None
json_response = json.loads(response.content)
print(json_response)
assert response.status_code == 422
assert json_response['message'] == "OCI API private key is required"
def test_error_get_oke_images_missing_tenancy():
oci_cred_body = {
"userOCID": "ocid1.user.oc1..aaaaaaaa",
"region": "us-phoenix-1",
"fingerprint": "xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx",
"privateKey": "-----BEGIN RSA PRIVATE KEY-----\nMIIE...ewBQ==\n-----END RSA PRIVATE KEY-----"
}
response = get_oci_meta_response("/meta/oci/nodeOkeImages", oci_cred_body)
assert response.content is not None
json_response = json.loads(response.content)
print(json_response)
assert response.status_code == 422
assert json_response['message'] == "OCI tenancy is required"
def test_error_get_invalid_endpoint():
oci_cred_body = {
"tenancyOCID": "ocid1.tenancy.oc1..aaaaaa",
"userOCID": "ocid1.user.oc1..aaaaaaaa",
"region": "ap-tokyo-1",
"fingerprint": "xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx",
"privateKey": "-----BEGIN RSA PRIVATE KEY-----\nMIIE...ewBQ==\n-----END RSA PRIVATE KEY-----"
}
response = get_oci_meta_response("/meta/oci/dne", oci_cred_body)
assert response.status_code == 404
assert response.content is not None
json_response = json.loads(response.content)
print(json_response)
assert "invalid endpoint" in json_response['message']
@okecredential
def test_get_oke_latest_versions():
oci_cred_body = {
"tenancyOCID": OCI_TENANCY_OCID,
"userOCID": OCI_USER_OCID,
"region": OCI_REGION,
"fingerprint": OCI_FINGERPRINT,
"privateKey": get_ssh_key_contents(OCI_PRIVATE_KEY_PATH)
}
response = get_oci_meta_response("/meta/oci/okeVersions", oci_cred_body)
assert response.status_code == 200
assert response.content is not None
json_response = json.loads(response.content)
latest_oke_version = json_response[-1]
print(latest_oke_version)
@okecredential
def test_create_oke_cluster():
client, cluster = create_and_validate_oke_cluster("test-auto-oke")
cluster_cleanup(client, cluster)
def create_and_validate_oke_cluster(name):
oci_cred_body = {
"tenancyOCID": OCI_TENANCY_OCID,
"userOCID": OCI_USER_OCID,
"region": OCI_REGION,
"fingerprint": OCI_FINGERPRINT,
"privateKey": get_ssh_key_contents(OCI_PRIVATE_KEY_PATH)
}
client = get_user_client()
print("Cluster creation")
# Get the region
if OCI_REGION is None:
region = "us-phoenix-1"
else:
region = OCI_REGION
# Get the node shape
if OKE_NODE_SHAPE is None:
response = get_oci_meta_response("/meta/oci/nodeShapes", oci_cred_body)
print(response.content)
assert response.status_code == 200
assert response.content is not None
json_response = json.loads(response.content)
print(json_response)
shape = json_response[-1]
else:
shape = OKE_NODE_SHAPE
# Get the node image
if OKE_NODE_IMAGE is None:
response = get_oci_meta_response("/meta/oci/nodeOkeImages", oci_cred_body)
assert response.status_code == 200
assert response.content is not None
json_response = json.loads(response.content)
print(json_response)
latest_oke_image = json_response[-1]
else:
latest_oke_image = OKE_NODE_IMAGE
# Get the OKE version
if OKE_VERSION is None:
response = get_oci_meta_response("/meta/oci/okeVersions", oci_cred_body)
assert response.status_code == 200
assert response.content is not None
json_response = json.loads(response.content)
print(json_response)
latest_oke_version = json_response[-1]
else:
latest_oke_version = OKE_VERSION
cluster = client.create_cluster(
name=name,
okeEngineConfig={
"availabilityDomain": "",
"compartmentId": OCI_COMPARTMENT_OCID,
"displayName": name,
"driverName": "oraclecontainerengine",
"enableKubernetesDashboard": False,
"enablePrivateNodes": False,
"enableTiller": False,
"fingerprint": OCI_FINGERPRINT,
"kubernetesVersion": latest_oke_version,
"loadBalancerSubnetName1": "",
"loadBalancerSubnetName2": "",
"name": name,
"nodeImage": latest_oke_image,
"nodePoolDnsDomainName": "nodedns",
"nodePoolSecurityListName": "Node Security List",
"nodePoolSubnetName": "nodedns",
"nodePublicKeyContents": "",
"nodePublicKeyPath": "",
"nodeShape": shape,
"privateKeyContents": get_ssh_key_contents(OCI_PRIVATE_KEY_PATH),
"privateKeyPassphrase": OCI_PRIVATE_KEY_PASSPHRASE,
"privateKeyPath": "",
"quantityOfNodeSubnets": 1,
"quantityPerSubnet": 1,
"region": region,
"serviceDnsDomainName": "svcdns",
"serviceSecurityListName": "Service Security List",
"skipVcnDelete": False,
"tenancyId": OCI_TENANCY_OCID,
"tenancyName": "",
"type": "okeEngineConfig",
"userOcid": OCI_USER_OCID,
"vcnName": "",
"waitNodesActive": 0,
"workerNodeIngressCidr": ""
})
print(cluster)
cluster = validate_cluster(client, cluster, check_intermediate_state=True,
skipIngresscheck=True)
return client, cluster
def get_oci_meta_response(endpoint, oci_cred_body):
headers = {"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + USER_TOKEN}
oke_version_url = CATTLE_TEST_URL + endpoint
response = requests.post(oke_version_url, json=oci_cred_body,
verify=False, headers=headers)
return response
def get_ssh_key_contents(path):
if os.path.exists(path):
with open(path, 'r') as f:
ssh_key = f.read()
return ssh_key
| 7,633 | 36.239024 | 114 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_proxy.py
|
import os
import time
from lib.aws import AWS_USER
from .common import (
AmazonWebServices, run_command
)
from .test_airgap import get_bastion_node
from .test_custom_host_reg import (
random_test_name, RANCHER_SERVER_VERSION, HOST_NAME, AGENT_REG_CMD
)
BASTION_ID = os.environ.get("RANCHER_BASTION_ID", "")
NUMBER_OF_INSTANCES = int(os.environ.get("RANCHER_AIRGAP_INSTANCE_COUNT", "1"))
PROXY_HOST_NAME = random_test_name(HOST_NAME)
RANCHER_PROXY_INTERNAL_HOSTNAME = \
PROXY_HOST_NAME + "-internal.qa.rancher.space"
RANCHER_PROXY_HOSTNAME = PROXY_HOST_NAME + ".qa.rancher.space"
RESOURCE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
SSH_KEY_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'.ssh')
RANCHER_PROXY_PORT = os.environ.get("RANCHER_PROXY_PORT", "3131")
def deploy_proxy_server():
node_name = PROXY_HOST_NAME + "-proxy"
proxy_node = AmazonWebServices().create_node(node_name)
# Copy SSH Key to proxy and local dir and give it proper permissions
write_key_command = "cat <<EOT >> {}.pem\n{}\nEOT".format(
proxy_node.ssh_key_name, proxy_node.ssh_key)
proxy_node.execute_command(write_key_command)
local_write_key_command = \
"mkdir -p {} && cat <<EOT >> {}/{}.pem\n{}\nEOT".format(
SSH_KEY_DIR, SSH_KEY_DIR,
proxy_node.ssh_key_name, proxy_node.ssh_key)
run_command(local_write_key_command, log_out=False)
set_key_permissions_command = "chmod 400 {}.pem".format(
proxy_node.ssh_key_name)
proxy_node.execute_command(set_key_permissions_command)
local_set_key_permissions_command = "chmod 400 {}/{}.pem".format(
SSH_KEY_DIR, proxy_node.ssh_key_name)
run_command(local_set_key_permissions_command, log_out=False)
# Write the proxy config to the node and run the proxy
proxy_node.execute_command("mkdir -p /home/ubuntu/squid/")
copy_cfg_command = \
'scp -q -i {}/{}.pem -o StrictHostKeyChecking=no ' \
'-o UserKnownHostsFile=/dev/null {}/squid/squid.conf ' \
'{}@{}:~/squid/squid.conf'.format(
SSH_KEY_DIR, proxy_node.ssh_key_name, RESOURCE_DIR,
AWS_USER, proxy_node.host_name)
run_command(copy_cfg_command, log_out=True)
squid_cmd = "sudo docker run -d " \
"-v /home/ubuntu/squid/squid.conf:/etc/squid/squid.conf " \
"-p {}:3128 wernight/squid".format(RANCHER_PROXY_PORT)
proxy_node.execute_command(squid_cmd)
print("Proxy Server Details:\nNAME: {}\nHOST NAME: {}\n"
"INSTANCE ID: {}\n".format(node_name, proxy_node.host_name,
proxy_node.provider_node_id))
return proxy_node
def run_command_on_proxy_node(bastion_node, ag_node, cmd, log_out=False):
ag_command = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no {}@{} ' \
'"{}"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address, cmd)
result = bastion_node.execute_command(ag_command)
if log_out:
print("Running command: {}".format(ag_command))
print("Result: {}".format(result))
return result
def prepare_airgap_proxy_node(bastion_node, number_of_nodes):
node_name = PROXY_HOST_NAME + "-agproxy"
ag_nodes = AmazonWebServices().create_multiple_nodes(
number_of_nodes, node_name, public_ip=False)
for num, ag_node in enumerate(ag_nodes):
ag_node_update_docker = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no {}@{} ' \
'"sudo usermod -aG docker {}"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address, AWS_USER)
bastion_node.execute_command(ag_node_update_docker)
proxy_url = bastion_node.host_name + ":" + RANCHER_PROXY_PORT
proxy_info = '[Service]\nEnvironment=\"HTTP_PROXY={}\" ' \
'\"HTTPS_PROXY={}\" ' \
'\"NO_PROXY=localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,' \
'cattle-system.svc\"' \
.format(proxy_url, proxy_url)
bastion_node.execute_command('echo "{}" > http-proxy.conf'
.format(proxy_info))
ag_node_create_dir = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no {}@{} ' \
'"sudo mkdir -p /etc/systemd/system/docker.service.d"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(ag_node_create_dir)
copy_conf_cmd = \
'scp -q -i "{}".pem -o StrictHostKeyChecking=no -o ' \
'UserKnownHostsFile=/dev/null ~/http-proxy.conf ' \
'{}@{}:~/'.format(bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(copy_conf_cmd)
ag_node_mv_conf = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no ' \
'-o UserKnownHostsFile=/dev/null {}@{} ' \
'"sudo mv http-proxy.conf /etc/systemd/system/docker.service.d/ ' \
'&& sudo systemctl daemon-reload && ' \
'sudo systemctl restart docker"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(ag_node_mv_conf)
print("Airgapped Proxy Instance Details:\n"
"NAME: {}-{}\nPRIVATE IP: {}\n"
"".format(node_name, num, ag_node.private_ip_address))
return ag_nodes
def deploy_proxy_rancher(bastion_node):
ag_node = prepare_airgap_proxy_node(bastion_node, 1)[0]
proxy_url = bastion_node.host_name + ":" + RANCHER_PROXY_PORT
deploy_rancher_command = \
'sudo docker run -d --privileged --restart=unless-stopped ' \
'-p 80:80 -p 443:443 ' \
'-e HTTP_PROXY={} ' \
'-e HTTPS_PROXY={} ' \
'-e NO_PROXY="localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,' \
'cattle-system.svc" ' \
'rancher/rancher:{} --trace'.format(
proxy_url, proxy_url,
RANCHER_SERVER_VERSION)
deploy_result = run_command_on_proxy_node(bastion_node, ag_node,
deploy_rancher_command,
log_out=True)
assert "Downloaded newer image for rancher/rancher:{}".format(
RANCHER_SERVER_VERSION) in deploy_result[1]
return ag_node
def register_cluster_nodes(bastion_node, ag_nodes):
results = []
for ag_node in ag_nodes:
deploy_result = run_command_on_proxy_node(bastion_node, ag_node,
AGENT_REG_CMD)
results.append(deploy_result)
return results
def create_nlb_and_add_targets(aws_nodes):
# Create internet-facing nlb and grab ARN & dns name
lb = AmazonWebServices().create_network_lb(name=PROXY_HOST_NAME + "-nlb")
lb_arn = lb["LoadBalancers"][0]["LoadBalancerArn"]
public_dns = lb["LoadBalancers"][0]["DNSName"]
# Create internal nlb and grab ARN & dns name
internal_lb = AmazonWebServices().create_network_lb(
name=PROXY_HOST_NAME + "-internal-nlb", scheme='internal')
internal_lb_arn = internal_lb["LoadBalancers"][0]["LoadBalancerArn"]
internal_lb_dns = internal_lb["LoadBalancers"][0]["DNSName"]
# Upsert the route53 record -- if it exists, update, if not, insert
AmazonWebServices().upsert_route_53_record_cname(
RANCHER_PROXY_INTERNAL_HOSTNAME, internal_lb_dns)
AmazonWebServices().upsert_route_53_record_cname(
RANCHER_PROXY_HOSTNAME, public_dns)
public_dns = RANCHER_PROXY_HOSTNAME
# Create the target groups
tg80 = AmazonWebServices(). \
create_ha_target_group(80, PROXY_HOST_NAME + "-tg-80")
tg443 = AmazonWebServices(). \
create_ha_target_group(443, PROXY_HOST_NAME + "-tg-443")
tg80_arn = tg80["TargetGroups"][0]["TargetGroupArn"]
tg443_arn = tg443["TargetGroups"][0]["TargetGroupArn"]
# Create the internal target groups
internal_tg80 = AmazonWebServices(). \
create_ha_target_group(80, PROXY_HOST_NAME + "-internal-tg-80")
internal_tg443 = AmazonWebServices(). \
create_ha_target_group(443, PROXY_HOST_NAME + "-internal-tg-443")
internal_tg80_arn = internal_tg80["TargetGroups"][0]["TargetGroupArn"]
internal_tg443_arn = internal_tg443["TargetGroups"][0]["TargetGroupArn"]
# Create listeners for the load balancers, to forward to the target groups
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=lb_arn, port=80, targetGroupARN=tg80_arn)
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=lb_arn, port=443, targetGroupARN=tg443_arn)
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=internal_lb_arn, port=80,
targetGroupARN=internal_tg80_arn)
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=internal_lb_arn, port=443,
targetGroupARN=internal_tg443_arn)
targets = []
for aws_node in aws_nodes:
targets.append(aws_node.provider_node_id)
# Register the nodes to the internet-facing targets
targets_list = [dict(Id=target_id, Port=80) for target_id in targets]
AmazonWebServices().register_targets(targets_list, tg80_arn)
targets_list = [dict(Id=target_id, Port=443) for target_id in targets]
AmazonWebServices().register_targets(targets_list, tg443_arn)
# Wait up to approx. 5 minutes for targets to begin health checks
for i in range(300):
health80 = AmazonWebServices().describe_target_health(
tg80_arn)['TargetHealthDescriptions'][0]['TargetHealth']['State']
health443 = AmazonWebServices().describe_target_health(
tg443_arn)['TargetHealthDescriptions'][0]['TargetHealth']['State']
if health80 in ['initial', 'healthy'] \
and health443 in ['initial', 'healthy']:
break
time.sleep(1)
# Register the nodes to the internal targets
targets_list = [dict(Id=target_id, Port=80) for target_id in targets]
AmazonWebServices().register_targets(targets_list, internal_tg80_arn)
targets_list = [dict(Id=target_id, Port=443) for target_id in targets]
AmazonWebServices().register_targets(targets_list, internal_tg443_arn)
# Wait up to approx. 5 minutes for targets to begin health checks
for i in range(300):
try:
health80 = AmazonWebServices().describe_target_health(
internal_tg80_arn)[
'TargetHealthDescriptions'][0]['TargetHealth']['State']
health443 = AmazonWebServices().describe_target_health(
internal_tg443_arn)[
'TargetHealthDescriptions'][0]['TargetHealth']['State']
if health80 in ['initial', 'healthy'] \
and health443 in ['initial', 'healthy']:
break
except Exception:
print("Target group healthchecks unavailable...")
time.sleep(1)
return public_dns
def test_deploy_proxied_rancher():
proxy_node = deploy_proxy_server()
proxy_rancher_node = deploy_proxy_rancher(proxy_node)
public_dns = create_nlb_and_add_targets([proxy_rancher_node])
print(
"\nConnect to bastion node with:\nssh -i {}.pem {}@{}\n"
"Connect to rancher node by connecting to bastion, then run:\n"
"ssh -i {}.pem {}@{}\n\nOpen the Rancher UI with: https://{}\n"
"".format(
proxy_node.ssh_key_name, AWS_USER,
proxy_node.host_name,
proxy_node.ssh_key_name, AWS_USER,
proxy_rancher_node.private_ip_address,
public_dns))
def test_deploy_proxy_nodes():
bastion_node = get_bastion_node(BASTION_ID)
ag_nodes = prepare_airgap_proxy_node(bastion_node, NUMBER_OF_INSTANCES)
assert len(ag_nodes) == NUMBER_OF_INSTANCES
print(
'{} airgapped instance(s) created.\n'
'Connect to these and run commands by connecting to bastion node, '
'then running the following command (with the quotes):\n'
'ssh -i {}.pem {}@NODE_PRIVATE_IP '.format(
NUMBER_OF_INSTANCES, bastion_node.ssh_key_name,
AWS_USER))
for ag_node in ag_nodes:
assert ag_node.private_ip_address is not None
assert ag_node.public_ip_address is None
results = register_cluster_nodes(bastion_node, ag_nodes)
for result in results:
assert "Downloaded newer image for rancher/rancher-agent" in result[1]
| 12,636 | 41.837288 | 79 |
py
|
rancher
|
rancher-master/tests/validation/lib/digital_ocean.py
|
import digitalocean
import os
import time
from .cloud_provider import CloudProviderBase
from .node import Node
PRIVATE_IMAGES = {
"ubuntu-16.04-docker-1.12.6": {
'image': 30447985, 'ssh_user': 'ubuntu'},
"ubuntu-16.04-docker-17.03": {
'image': 30473722, 'ssh_user': 'ubuntu'},
"ubuntu-16.04-docker-1.13.1": {
'image': 30473815, 'ssh_user': 'ubuntu'}}
DO_TOKEN = os.environ.get("DO_TOKEN")
DO_SSH_KEY_ID = os.environ.get("DO_SSH_KEY_ID")
DO_SSH_KEY_NAME = os.environ.get("DO_SSH_KEY_NAME")
class DigitalOcean(CloudProviderBase):
DROPLET_STATE_MAP = {
'running': 'create',
'stopped': 'shutdown',
'terminated': 'destroy'
}
def __init__(self):
self._manager = digitalocean.Manager(token=DO_TOKEN)
self._token = DO_TOKEN
if DO_SSH_KEY_NAME:
self.master_ssh_private_key = self.get_ssh_key(DO_SSH_KEY_NAME)
self.master_ssh_public_key = self.get_ssh_key(
DO_SSH_KEY_NAME + '.pub')
self.master_ssh_private_key_path = self.get_ssh_key_path(
DO_SSH_KEY_NAME)
def _select_ami(self, os_version=None, docker_version=None):
os_version = os_version or self.OS_VERSION
docker_version = docker_version or self.DOCKER_VERSION
image = PRIVATE_IMAGES[
"{}-docker-{}".format(os_version, docker_version)]
return image['image'], image['ssh_user']
def create_node(
self, node_name, key_name=None, os_version=None, docker_version=None,
wait_for_ready=True):
os_version = os_version or self.OS_VERSION
docker_version = docker_version or self.DOCKER_VERSION
image, ssh_user = self._select_ami(os_version, docker_version)
if key_name:
# get private key
ssh_private_key_name = key_name.replace('.pub', '')
ssh_private_key = self.get_ssh_key(ssh_private_key_name)
ssh_private_key_path = self.get_ssh_key_path(ssh_private_key_name)
ssh_key_id = self._get_ssh_key_id(key_name)
droplet = digitalocean.Droplet(
token=self._token,
name=node_name,
region='sfo1',
image=image,
size_slug='2gb',
ssh_keys=[ssh_key_id],
backups=False)
droplet.create()
node = Node(
provider_node_id=droplet.id,
state=droplet.status,
ssh_user=ssh_user,
ssh_key_name=ssh_private_key_name,
ssh_key_path=ssh_private_key_path,
ssh_key=ssh_private_key,
os_version=os_version,
docker_version=docker_version)
if wait_for_ready:
self.wait_for_node_state(node)
node.wait_for_ssh_ready()
return node
def create_multiple_nodes(
self, number_of_nodes, node_name_prefix, os_version=None,
docker_version=None, key_name=None, wait_for_ready=True):
nodes = []
for i in range(number_of_nodes):
node_name = "{}_{}".format(node_name_prefix, i)
nodes.append(self.create_node(
node_name, key_name=key_name, os_version=os_version,
docker_version=docker_version, wait_for_ready=False))
if wait_for_ready:
nodes = self.wait_for_nodes_state(nodes)
for node in nodes:
node.wait_for_ssh_ready()
return nodes
def get_node(self, provider_id):
droplet = self._manager.get_droplet(provider_id)
node = Node(
provider_node_id=droplet.id,
node_name=droplet.name,
ip_address=droplet.ip_address,
state=droplet.status,
labels=droplet.tags)
return node
def stop_node(self, node, wait_for_stopped=False):
droplet = self._manager.get_droplet(node.provider_node_id)
droplet.shutdown()
if wait_for_stopped:
self.wait_for_node_state(node, 'stopped')
def delete_node(self, node, wait_for_deleted=False):
droplet = self._manager.get_droplet(node.provider_node_id)
droplet.destroy()
if wait_for_deleted:
self.wait_for_node_state(node, 'terminated')
def wait_for_node_state(self, node, state='running'):
action_type = self.DROPLET_STATE_MAP[state]
droplet = self._manager.get_droplet(node.provider_node_id)
actions = droplet.get_actions()
action = None
for item in actions:
if item.type == action_type:
action = item
break
else:
raise Exception(
"Unable to find action for {0}: {1}".format(
state, action_type))
timeout = 300
start_time = start_time = time.time()
while action.status != "completed":
if time.time() - start_time > timeout:
raise Exception("{0} node timed out".format(state))
action.load()
if action_type == "create":
droplet.load()
node.host_name = droplet.name
node.ip_address = droplet.ip_address
node.labels = droplet.tags
node.state = action_type
def _get_ssh_key_id(self, key_name):
pass
| 5,305 | 31.956522 | 78 |
py
|
rancher
|
rancher-master/tests/validation/lib/aws.py
|
import base64
import boto3
import logging
import os
import rsa
import time
from boto3.exceptions import Boto3Error
from botocore.exceptions import ClientError
from .cloud_provider import CloudProviderBase
from .node import Node
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
AWS_REGION = os.environ.get("AWS_REGION", "us-east-2")
AWS_REGION_AZ = os.environ.get("AWS_REGION_AZ", "us-east-2a")
AWS_SECURITY_GROUP = os.environ.get("AWS_SECURITY_GROUPS",
'sg-0e753fd5550206e55')
AWS_SUBNET = os.environ.get("AWS_SUBNET", "subnet-ee8cac86")
AWS_HOSTED_ZONE_ID = os.environ.get("AWS_HOSTED_ZONE_ID", "")
AWS_VPC_ID = os.environ.get("AWS_VPC_ID", "vpc-bfccf4d7")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
AWS_CICD_INSTANCE_TAG = os.environ.get("AWS_CICD_INSTANCE_TAG",
'rancher-validation')
AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "")
# by default the public Ubuntu 18.04 AMI is used
AWS_DEFAULT_AMI = "ami-0d5d9d301c853a04a"
AWS_DEFAULT_USER = "ubuntu"
AWS_AMI = os.environ.get("AWS_AMI", AWS_DEFAULT_AMI)
AWS_USER = os.environ.get("AWS_USER", AWS_DEFAULT_USER)
AWS_VOLUME_SIZE = os.environ.get("AWS_VOLUME_SIZE", "50")
AWS_INSTANCE_TYPE = os.environ.get("AWS_INSTANCE_TYPE", 't3a.medium')
AWS_WINDOWS_VOLUME_SIZE = os.environ.get("AWS_WINDOWS_VOLUME_SIZE", "100")
AWS_WINDOWS_INSTANCE_TYPE = 't3.xlarge'
EKS_VERSION = os.environ.get("RANCHER_EKS_K8S_VERSION")
EKS_ROLE_ARN = os.environ.get("RANCHER_EKS_ROLE_ARN")
EKS_WORKER_ROLE_ARN = os.environ.get("RANCHER_EKS_WORKER_ROLE_ARN")
AWS_SUBNETS = []
if ',' in AWS_SUBNET:
AWS_SUBNETS = AWS_SUBNET.split(',')
else:
AWS_SUBNETS = [AWS_SUBNET]
class AmazonWebServices(CloudProviderBase):
def __init__(self):
self._client = boto3.client(
'ec2',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_REGION)
self._elbv2_client = boto3.client(
'elbv2',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_REGION)
self._route53_client = boto3.client(
'route53',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
self._db_client = boto3.client(
'rds',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_REGION)
self._eks_client = boto3.client(
'eks',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_REGION)
self.master_ssh_key = None
self.master_ssh_key_path = None
if AWS_SSH_KEY_NAME:
self.master_ssh_key = self.get_ssh_key(AWS_SSH_KEY_NAME)
self.master_ssh_key_path = self.get_ssh_key_path(AWS_SSH_KEY_NAME)
# Used for cleanup
self.created_node = []
self.created_keys = []
def create_node(self, node_name, ami=AWS_AMI, ssh_user=AWS_USER,
key_name=None, wait_for_ready=True, public_ip=True):
volume_size = AWS_VOLUME_SIZE
instance_type = AWS_INSTANCE_TYPE
if ssh_user == "Administrator":
volume_size = AWS_WINDOWS_VOLUME_SIZE
instance_type = AWS_WINDOWS_INSTANCE_TYPE
if key_name:
# if cert private key
if key_name.endswith('.pem'):
ssh_private_key_name = key_name
ssh_private_key = self.get_ssh_key(key_name)
ssh_private_key_path = self.get_ssh_key_path(key_name)
else:
# get private key
ssh_private_key_name = key_name.replace('.pub', '')
ssh_private_key = self.get_ssh_key(ssh_private_key_name)
ssh_private_key_path = self.get_ssh_key_path(
ssh_private_key_name)
else:
key_name = AWS_SSH_KEY_NAME.replace('.pem', '')
ssh_private_key_name = key_name
ssh_private_key = self.master_ssh_key
ssh_private_key_path = self.master_ssh_key_path
args = {"ImageId": ami,
"InstanceType": instance_type,
"MinCount": 1,
"MaxCount": 1,
"TagSpecifications": [
{'ResourceType': 'instance',
'Tags': [
{'Key': 'Name', 'Value': node_name},
{'Key': 'CICD', 'Value': AWS_CICD_INSTANCE_TAG}
]}
],
"KeyName": key_name,
"NetworkInterfaces": [
{'DeviceIndex': 0,
'AssociatePublicIpAddress': public_ip,
'Groups': [AWS_SECURITY_GROUP]
}
],
"Placement": {'AvailabilityZone': AWS_REGION_AZ},
"BlockDeviceMappings":
[{"DeviceName": "/dev/sda1",
"Ebs": {"VolumeSize": int(volume_size)}
}]
}
if len(AWS_IAM_PROFILE) > 0:
args["IamInstanceProfile"] = {'Name': AWS_IAM_PROFILE}
args["TagSpecifications"][0]["Tags"].append(
{'Key': 'kubernetes.io/cluster/c-abcde', 'Value': "owned"}
)
instance = self._client.run_instances(**args)
node = Node(
provider_node_id=instance['Instances'][0]['InstanceId'],
state=instance['Instances'][0]['State']['Name'],
ssh_user=ssh_user,
ssh_key_name=ssh_private_key_name,
ssh_key_path=ssh_private_key_path,
ssh_key=ssh_private_key,
docker_version=self.DOCKER_VERSION,
docker_installed=self.DOCKER_INSTALLED)
# mark for clean up at the end
self.created_node.append(node.provider_node_id)
if wait_for_ready:
node = self.wait_for_node_state(node)
if public_ip:
node.ready_node()
else:
time.sleep(60)
return node
def create_multiple_nodes(self, number_of_nodes, node_name_prefix,
ami=AWS_AMI, ssh_user=AWS_USER,
key_name=None, wait_for_ready=True,
public_ip=True):
nodes = []
for i in range(number_of_nodes):
node_name = "{}-{}".format(node_name_prefix, i)
nodes.append(self.create_node(node_name,
ami=ami, ssh_user=ssh_user,
key_name=key_name,
wait_for_ready=False,
public_ip=public_ip))
if wait_for_ready:
nodes = self.wait_for_nodes_state(nodes)
# wait for window nodes to come up so we can decrypt the password
if ssh_user == "Administrator":
time.sleep(60 * 6)
for node in nodes:
node.ssh_password = \
self.decrypt_windows_password(node.provider_node_id)
if public_ip:
for node in nodes:
node.ready_node()
else:
time.sleep(60)
return nodes
def get_node(self, provider_id, ssh_access=False):
node_filter = [{
'Name': 'instance-id', 'Values': [provider_id]}]
try:
response = self._client.describe_instances(Filters=node_filter)
nodes = response.get('Reservations', [])
if len(nodes) == 0:
return None # no node found
aws_node = nodes[0]['Instances'][0]
node = Node(
provider_node_id=provider_id,
# node_name= aws_node tags?,
host_name=aws_node.get('PublicDnsName'),
public_ip_address=aws_node.get('PublicIpAddress'),
private_ip_address=aws_node.get('PrivateIpAddress'),
state=aws_node['State']['Name'])
if ssh_access:
node.ssh_user = AWS_USER
node.ssh_key_name = AWS_SSH_KEY_NAME.replace('.pem', '')
node.ssh_key_path = self.master_ssh_key_path
node.ssh_key = self.master_ssh_key
return node
except Boto3Error as e:
msg = "Failed while querying instance '{}' state!: {}".format(
node.node_id, str(e))
raise RuntimeError(msg)
def update_node(self, node):
node_filter = [{
'Name': 'instance-id', 'Values': [node.provider_node_id]}]
try:
response = self._client.describe_instances(Filters=node_filter)
nodes = response.get('Reservations', [])
if len(nodes) == 0 or len(nodes[0]['Instances']) == 0:
return node
aws_node = nodes[0]['Instances'][0]
node.state = aws_node['State']['Name']
node.host_name = aws_node.get('PublicDnsName')
node.public_ip_address = aws_node.get('PublicIpAddress')
node.private_ip_address = aws_node.get('PrivateIpAddress')
return node
except Boto3Error as e:
msg = "Failed while querying instance '{}' state!: {}".format(
node.node_id, str(e))
raise RuntimeError(msg)
def start_node(self, node, wait_for_start=True):
self._client.start_instances(
InstanceIds=[node.provider_node_id])
if wait_for_start:
node = self.wait_for_node_state(node)
return node
def reboot_nodes(self, nodes):
instances = [node.provider_node_id for node in nodes]
self._client.reboot_instances(
InstanceIds=instances)
return
def stop_node(self, node, wait_for_stopped=False):
self._client.stop_instances(
InstanceIds=[node.provider_node_id])
if wait_for_stopped:
node = self.wait_for_node_state(node, 'stopped')
return node
def delete_node(self, node, wait_for_deleted=False):
self._client.terminate_instances(
InstanceIds=[node.provider_node_id])
if wait_for_deleted:
node = self.wait_for_node_state(node, 'terminated')
return node
def delete_eks_cluster(self, cluster_name):
ng_names = self._eks_client.list_nodegroups(clusterName=cluster_name)
for node_group in ng_names['nodegroups']:
print("Deleting node group: " + node_group)
delete_ng_response = self._eks_client.delete_nodegroup(
clusterName=cluster_name,
nodegroupName=node_group)
waiter_ng = self._eks_client.get_waiter('nodegroup_deleted')
for node_group in ng_names['nodegroups']:
print("Waiting for deletion of: " + node_group)
waiter_ng.wait(clusterName=cluster_name, nodegroupName=node_group)
print("Deleting cluster: "+ cluster_name)
delete_response = self._eks_client.delete_cluster(name=cluster_name)
return delete_response
def wait_for_node_state(self, node, state='running'):
# 'running', 'stopped', 'terminated'
timeout = 300
start_time = time.time()
while time.time() - start_time < timeout:
node = self.update_node(node)
if node.state == state:
return node
time.sleep(5)
def wait_for_nodes_state(self, nodes, state='running'):
# 'running', 'stopped', 'terminated'
timeout = 300
start_time = time.time()
completed_nodes = []
while time.time() - start_time < timeout:
for node in nodes:
if len(completed_nodes) == len(nodes):
time.sleep(20) # Give the node some extra time
return completed_nodes
if node in completed_nodes:
continue
node = self.update_node(node)
if node.state == state:
completed_nodes.append(node)
time.sleep(1)
time.sleep(4)
def import_ssh_key(self, ssh_key_name, public_ssh_key):
self._client.delete_key_pair(KeyName=ssh_key_name)
self._client.import_key_pair(
KeyName=ssh_key_name,
PublicKeyMaterial=public_ssh_key)
# mark keys for cleanup
self.created_keys.append(ssh_key_name)
def delete_ssh_key(self, ssh_key_name):
self._client.delete_key_pair(KeyName=ssh_key_name)
def get_nodes(self, filters):
try:
response = self._client.describe_instances(Filters=filters)
nodes = response.get('Reservations', [])
if len(nodes) == 0:
return None # no node found
ret_nodes = []
for aws_node_i in nodes:
aws_node = aws_node_i['Instances'][0]
node = Node(
provider_node_id=aws_node.get('InstanceId'),
# node_name= aws_node tags?,
host_name=aws_node.get('PublicDnsName'),
public_ip_address=aws_node.get('PublicIpAddress'),
private_ip_address=aws_node.get('PrivateIpAddress'),
state=aws_node['State']['Name'])
ret_nodes.append(node)
return ret_nodes
except Boto3Error as e:
msg = "Failed while getting instances: {}".format(str(e))
raise RuntimeError(msg)
def delete_nodes(self, nodes, wait_for_deleted=False):
instance_ids = [node.provider_node_id for node in nodes]
self._client.terminate_instances(InstanceIds=instance_ids)
if wait_for_deleted:
for node in nodes:
node = self.wait_for_node_state(node, 'terminated')
def delete_keypairs(self, name_prefix):
if len(name_prefix) > 0:
key_pairs = self._client.describe_key_pairs()
print(key_pairs["KeyPairs"])
key_pair_list = key_pairs["KeyPairs"]
print(len(key_pair_list))
for key in key_pair_list:
keyName = key["KeyName"]
if keyName.startswith(name_prefix):
print(keyName)
self._client.delete_key_pair(KeyName=keyName)
def _s3_list_files(self, client):
"""List files in specific S3 URL"""
response = client.list_objects(
Bucket=os.environ.get("AWS_S3_BUCKET_NAME", ""),
Prefix=os.environ.get("AWS_S3_BUCKET_FOLDER_NAME", ""))
for content in response.get('Contents', []):
yield content.get('Key')
def s3_backup_check(self, filename=""):
print(AWS_REGION)
print(AWS_REGION_AZ)
client = boto3.client(
's3',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_REGION)
file_list = self._s3_list_files(client)
found = False
for file in file_list:
print(file)
if filename in file:
found = True
break
return found
def register_targets(self, targets, target_group_arn):
self._elbv2_client.register_targets(
TargetGroupArn=target_group_arn,
Targets=targets)
def describe_target_health(self, target_group_arn):
return self._elbv2_client.describe_target_health(
TargetGroupArn=target_group_arn)
def deregister_all_targets(self, target_group_arn):
target_health_descriptions = \
self.describe_target_health(target_group_arn)
if len(target_health_descriptions["TargetHealthDescriptions"]) > 0:
targets = []
for target in \
target_health_descriptions["TargetHealthDescriptions"]:
target_obj = target["Target"]
targets.append(target_obj)
self._elbv2_client.deregister_targets(
TargetGroupArn=target_group_arn,
Targets=targets)
def create_network_lb(self, name, scheme='internet-facing'):
return self._elbv2_client.create_load_balancer(
Name=name, Subnets=[AWS_SUBNET], Scheme=scheme, Type='network'
)
def delete_lb(self, loadBalancerARN):
self._elbv2_client.delete_load_balancer(
LoadBalancerArn=loadBalancerARN
)
def create_ha_target_group(self, port, name):
return self._elbv2_client.create_target_group(
Name=name,
Protocol='TCP',
Port=port,
VpcId=AWS_VPC_ID,
HealthCheckProtocol='HTTP',
HealthCheckPort='80',
HealthCheckEnabled=True,
HealthCheckPath='/healthz',
HealthCheckIntervalSeconds=10,
HealthCheckTimeoutSeconds=6,
HealthyThresholdCount=3,
UnhealthyThresholdCount=3,
Matcher={
'HttpCode': '200-399'
},
TargetType='instance'
)
def delete_target_group(self, targetGroupARN):
self._elbv2_client.delete_target_group(
TargetGroupArn=targetGroupARN
)
def create_ha_nlb_listener(self, loadBalancerARN, port, targetGroupARN):
return self._elbv2_client.create_listener(
LoadBalancerArn=loadBalancerARN,
Protocol='TCP',
Port=port,
DefaultActions=[{'Type': 'forward',
'TargetGroupArn': targetGroupARN}]
)
def upsert_route_53_record_cname(
self, record_name, record_value, action='UPSERT',
record_type='CNAME', record_ttl=300):
return self._route53_client.change_resource_record_sets(
HostedZoneId=AWS_HOSTED_ZONE_ID,
ChangeBatch={
'Comment': 'Record created or updated for automation',
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': record_name,
'Type': record_type,
'TTL': record_ttl,
'ResourceRecords': [{
'Value': record_value
}]
}
}]
}
)
def delete_route_53_record(self, record_name):
record = None
try:
res = self._route53_client.list_resource_record_sets(
HostedZoneId=AWS_HOSTED_ZONE_ID,
StartRecordName=record_name,
MaxItems='1')
if len(res["ResourceRecordSets"]) > 0:
record = res["ResourceRecordSets"][0]
except ClientError as e:
print(e.response)
if record is not None and record["Name"] == record_name:
self._route53_client.change_resource_record_sets(
HostedZoneId=AWS_HOSTED_ZONE_ID,
ChangeBatch={
'Comment': 'delete record',
'Changes': [{
'Action': 'DELETE',
'ResourceRecordSet': record}]
}
)
def decrypt_windows_password(self, instance_id):
password = ""
password_data = self._client. \
get_password_data(InstanceId=instance_id)['PasswordData']
if password_data:
password = base64.b64decode(password_data)
with open(self.get_ssh_key_path(AWS_SSH_KEY_NAME), 'r') \
as privkeyfile:
priv = rsa.PrivateKey.load_pkcs1(privkeyfile.read())
password = rsa.decrypt(password, priv).decode('utf-8')
return password
def get_ebs_volumes(self, provider_node_id):
node_filter = [{
'Name': 'attachment.instance-id', 'Values': [provider_node_id]}]
try:
response = self._client.describe_volumes(Filters=node_filter)
volumes = response.get('Volumes', [])
return volumes
except (Boto3Error, RuntimeError) as e:
msg = "Failed while querying instance '{}' volumes!: {}".format(
provider_node_id, str(e))
raise RuntimeError(msg)
def get_security_group_name(self, security_group_id):
sg_filter = [{
'Name': 'group-id', 'Values': [security_group_id]}]
try:
response = self._client.describe_security_groups(Filters=sg_filter)
security_groups = response.get('SecurityGroups', [])
if len(security_groups) > 0:
return security_groups[0]['GroupName']
except Boto3Error as e:
msg = "Failed while querying security group name for '{}' " \
"in region {}: {}".format(security_group_id,
AWS_REGION, str(e))
raise RuntimeError(msg)
def get_target_groups(self, lb_arn):
tg_list = []
try:
res = self._elbv2_client.describe_listeners(
LoadBalancerArn=lb_arn)
except ClientError:
return tg_list
if res is not None:
for item in res["Listeners"]:
tg_arn = item["DefaultActions"][0]["TargetGroupArn"]
tg_list.append(tg_arn)
return tg_list
def get_lb(self, name):
try:
res = self._elbv2_client.describe_load_balancers(Names=[name])
return res['LoadBalancers'][0]['LoadBalancerArn']
except ClientError:
return None
def get_db(self, db_id):
try:
res = self._db_client.\
describe_db_instances(DBInstanceIdentifier=db_id)
return res['DBInstances'][0]['DBInstanceIdentifier']
except ClientError:
return None
def delete_db(self, db_id):
try:
self._db_client.delete_db_instance(DBInstanceIdentifier=db_id,
SkipFinalSnapshot=True,
DeleteAutomatedBackups=True)
except ClientError:
return None
def create_eks_cluster(self, name):
kubeconfig_path = self.create_eks_controlplane(name)
self.create_eks_nodegroup(name, '{}-ng'.format(name))
return kubeconfig_path
def create_eks_controlplane(self, name):
vpcConfiguration = {
"subnetIds": AWS_SUBNETS,
"securityGroupIds": [AWS_SECURITY_GROUP],
"endpointPublicAccess": True,
"endpointPrivateAccess": False
}
self._eks_client.\
create_cluster(name=name,
version=EKS_VERSION,
roleArn=EKS_ROLE_ARN,
resourcesVpcConfig=vpcConfiguration)
return self.wait_for_eks_cluster_state(name, "ACTIVE")
def create_eks_nodegroup(self, cluster_name, name):
scaling_config = {
"minSize": 3,
"maxSize": 3,
"desiredSize": 3
}
remote_access = {
"ec2SshKey": AWS_SSH_KEY_NAME.replace('.pem', '')
}
ng = self._eks_client.\
create_nodegroup(clusterName=cluster_name,
nodegroupName=name,
scalingConfig=scaling_config,
diskSize=20,
subnets=AWS_SUBNETS,
instanceTypes=[AWS_INSTANCE_TYPE],
nodeRole=EKS_WORKER_ROLE_ARN,
remoteAccess=remote_access)
waiter_ng = self._eks_client.get_waiter('nodegroup_active')
waiter_ng.wait(clusterName=cluster_name, nodegroupName=name)
return ng
def describe_eks_cluster(self, name):
try:
return self._eks_client.describe_cluster(name=name)
except ClientError:
return None
def describe_eks_nodegroup(self, cluster_name, nodegroup_name):
try:
return self._eks_client.describe_nodegroup(
clusterName=cluster_name,
nodegroupName=nodegroup_name
)
except ClientError:
return None
def wait_for_eks_cluster_state(self, name, target_state, timeout=1200):
start = time.time()
cluster = self.describe_eks_cluster(name)['cluster']
status = cluster['status']
while status != target_state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to " + target_state)
time.sleep(5)
cluster = self.describe_eks_cluster(name)['cluster']
status = cluster['status']
print(status)
return cluster
def wait_for_delete_eks_cluster(self, cluster_name):
ng_names = self._eks_client.list_nodegroups(clusterName=cluster_name)
waiter_ng = self._eks_client.get_waiter('nodegroup_deleted')
for node_group in ng_names['nodegroups']:
print ("Waiting for deletion of nodegroup: {}".format(node_group))
waiter_ng.wait(clusterName=cluster_name, nodegroupName=node_group)
print ("Waiting for deletion of cluster: {}".format(cluster_name))
waiter = self._eks_client.get_waiter('cluster_deleted')
waiter.wait(name=cluster_name)
def disable_source_dest_check(self, instance_id):
response = self._client.modify_instance_attribute(
SourceDestCheck={'Value': False},
InstanceId=instance_id)
return response
| 26,266 | 37.856509 | 79 |
py
|
rancher
|
rancher-master/tests/validation/lib/node.py
|
import json
import logging
import paramiko
import time
logging.getLogger("paramiko").setLevel(logging.CRITICAL)
DOCKER_INSTALL_CMD = (
"curl https://releases.rancher.com/install-docker/{0}.sh | sh")
class Node(object):
def __init__(self, provider_node_id=None, host_name=None, node_name=None,
public_ip_address=None, private_ip_address=None, state=None,
labels=None, host_name_override=None, ssh_key=None,
ssh_key_name=None, ssh_key_path=None, ssh_user=None,
docker_version=None, docker_installed="false"):
self.provider_node_id = provider_node_id
# node name giving to k8s node, hostname override
self.node_name = node_name
# Depending on the RKE config, this can be updated to be
# either the internal IP, external IP address or FQDN name
self.node_address = None
self.host_name = host_name
self.host_name_override = host_name_override
self.public_ip_address = public_ip_address
self.private_ip_address = private_ip_address
self.ssh_user = ssh_user
self.ssh_key = ssh_key
self.ssh_key_name = ssh_key_name
self.ssh_key_path = ssh_key_path
self.docker_version = docker_version
self.docker_installed = docker_installed
self._roles = []
self.labels = labels or {}
self.state = state
self._ssh_client = paramiko.SSHClient()
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_port = '22'
self._ssh_password = None
@property
def ssh_password(self):
return self._ssh_password
@ssh_password.setter
def ssh_password(self, password):
self._ssh_password = password
@property
def roles(self):
return self._roles
@roles.setter
def roles(self, r):
self._roles = r
def wait_for_ssh_ready(self):
command = 'whoami'
start_time = int(time.time())
logs_while_waiting = ''
while int(time.time()) - start_time < 100:
try:
self._ssh_client.connect(
self.public_ip_address, username=self.ssh_user,
key_filename=self.ssh_key_path, port=int(self.ssh_port))
result = self._ssh_client.exec_command(command)
if result and len(result) == 3 and result[1].readable():
result = [result[1].read(), result[2].read()]
self._ssh_client.close()
return True
except Exception as e:
self._ssh_client.close()
time.sleep(3)
logs_while_waiting += str(e) + '\n'
continue
raise Exception(
"Unable to connect to node '{0}' by SSH: {1}".format(
self.public_ip_address, logs_while_waiting))
def execute_command(self, command):
result = None
try:
if self.ssh_password is not None:
self._ssh_client.connect(
self.public_ip_address, username=self.ssh_user,
password=self.ssh_password, port=int(self.ssh_port))
else:
self._ssh_client.connect(
self.public_ip_address, username=self.ssh_user,
key_filename=self.ssh_key_path, port=int(self.ssh_port))
result = self._ssh_client.exec_command(command)
if result and len(result) == 3 and result[1].readable():
result = [str(result[1].read(), 'utf-8'),
str(result[2].read(), 'utf-8')]
finally:
self._ssh_client.close()
return result
def install_docker(self):
# TODO: Fix to install native on RHEL 7.4
command = (
"{} && sudo usermod -aG docker {} && sudo systemctl enable docker"
.format(
DOCKER_INSTALL_CMD.format(self.docker_version),
self.ssh_user))
return self.execute_command(command)
def ready_node(self):
# ignore Windows node
if self.ssh_user == "Administrator":
return
self.wait_for_ssh_ready()
if self.docker_installed.lower() == 'false':
self.install_docker()
def docker_ps(self, all=False, includeall=False):
result = self.execute_command(
'docker ps --format "{{.Names}}\t{{.Image}}"')
if includeall:
print("Docker ps including all containers")
result = self.execute_command(
'docker ps -a --format "{{.Names}}\t{{.Image}}"')
if result[1] != '':
raise Exception(
"Error:'docker ps' command received this stderr output: "
"{0}".format(result[1]))
parse_out = result[0].strip('\n').split('\n')
ret_dict = {}
if parse_out == '':
return ret_dict
for item in parse_out:
item0, item1 = item.split('\t')
ret_dict[item0] = item1
return ret_dict
def docker_inspect(self, container_name, output_format=None):
if output_format:
command = 'docker inspect --format \'{0}\' {1}'.format(
output_format, container_name)
else:
command = 'docker inspect {0}'.format(container_name)
result = self.execute_command(command)
if result[1] != '':
raise Exception(
"Error:'docker inspect' command received this stderr output: "
"{0}".format(result[1]))
result = json.loads(result[0])
return result
def docker_exec(self, container_name, cmd):
command = 'docker exec {0} {1}'.format(container_name, cmd)
result = self.execute_command(command)
print(result)
if result[1] != '':
raise Exception(
"Error:'docker exec' command received this stderr output: "
"{0}".format(result[1]))
return result[0]
def get_public_ip(self):
return self.public_ip_address
| 6,138 | 35.981928 | 78 |
py
|
rancher
|
rancher-master/tests/validation/lib/kubectl_client.py
|
import os
import json
import time
import subprocess
DEBUG = os.environ.get('DEBUG', 'false')
CONFORMANCE_YAML = ("tests/kubernetes_conformance/resources/k8s_ymls/"
"sonobuoy-conformance.yaml")
class KubectlClient(object):
def __init__(self):
self._kube_config_path = None
self._hide = False if DEBUG.lower() == 'true' else True
@property
def kube_config_path(self):
return self._kube_config_path
@kube_config_path.setter
def kube_config_path(self, value):
self._kube_config_path = value
@staticmethod
def _load_json(output):
if output == '':
return None
return json.loads(output)
def _default_output_json(self, **cli_options):
"""
Adds --output=json to options
Does not override if output is passed in!
"""
if 'output' not in list(cli_options.keys()):
cli_options['output'] = 'json'
return cli_options
def _cli_options(self, **kwargs):
"""
Pass through any kubectl option
A couple of exceptions for the keyword args mapping to the
cli options names:
1) if option flag has a '-', replace with '_'
e.i. '--all-namespaces' can be passed in all_namespaces=True
2) reserved words:
For cli option: 'as' => 'as_user'
"""
command_options = ""
for k, v in kwargs.items():
# Do not include values that are none
if v is None:
continue
# reserved word
k = 'as' if k == 'as_user' else k
# k = 'all' if k == 'all_' else k
if v is False or v is True:
value = str(v).lower()
else:
value = v
command_options += " --{}={}".format(k.replace('_', '-'), value)
return command_options
def execute_kubectl_cmd(self, cmd, json_out=True):
command = 'kubectl --kubeconfig {0} {1}'.format(
self.kube_config_path, cmd)
if json_out:
command += ' -o json'
print("Running kubectl command: {}".format(command))
start_time = time.time()
result = self.run_command(command)
end_time = time.time()
print('Run time for command {0}: {1} seconds'.format(
command, end_time - start_time))
return result
def execute_kubectl(self, cmd, **cli_options):
# always add kubeconfig
cli_options['kubeconfig'] = self.kube_config_path
command = 'kubectl {0}{1}'.format(
cmd, self._cli_options(**cli_options))
print("Running kubectl command: {}".format(command))
start_time = time.time()
result = self.run_command_with_stderr(command)
end_time = time.time()
print('Run time for command {0}: {1} seconds'.format(
command, end_time - start_time))
return result
def exec_cmd(self, pod, cmd, namespace):
result = self.execute_kubectl_cmd(
'exec {0} --namespace={1} -- {2}'.format(pod, namespace, cmd),
json_out=False)
return result
def logs(self, pod='', **cli_options):
command = 'logs {0}'.format(pod) if pod else "logs"
result = self.execute_kubectl(command, **cli_options)
return result
def cp_from_pod(self, pod, namespace, path_in_pod, local_path):
command = "cp {}/{}:{} {}".format(
namespace, pod, path_in_pod, local_path)
return self.execute_kubectl(command)
def list_namespaces(self):
ns = self.get_resource("namespace")
return [n['metadata']['name'] for n in ns['items']]
def get_nodes(self):
nodes = self.get_resource("nodes")
return nodes
def create_ns(self, namespace):
self.create_resource("namespace", namespace)
# Verify namespace is created
ns = self.get_resource("namespace", name=namespace)
assert ns["metadata"]["name"] == namespace
assert ns["status"]["phase"] == "Active"
return ns
def run(self, name, **cli_options):
command = "run {0}".format(name)
result = self.execute_kubectl(command, **cli_options)
return result
def create_resourse_from_yml(self, file_yml, namespace=None):
cmd = "create -f {0}".format(file_yml)
if namespace:
cmd += ' --namespace={0}'.format(namespace)
return self.execute_kubectl_cmd(cmd)
def delete_resourse_from_yml(self, file_yml, namespace=None):
cmd = "delete -f {0}".format(file_yml)
if namespace:
cmd += ' --namespace={0}'.format(namespace)
return self.execute_kubectl_cmd(cmd, json_out=False)
def create_resource(self, resource, name=None, **cli_options):
cli_options = self._default_output_json(**cli_options)
command = "create {0}".format(resource)
if name:
command += ' {0}'.format(name)
result = self.execute_kubectl(command, **cli_options)
return self._load_json(result)
def get_resource(self, resource, name=None, **cli_options):
cli_options = self._default_output_json(**cli_options)
command = "get {0}".format(resource)
if name:
command += ' {0}'.format(name)
result = self.execute_kubectl(command, **cli_options)
return self._load_json(result)
def delete_resourse(self, resource, name=None, **cli_options):
command = "delete {0}".format(resource)
if name:
command += ' {0}'.format(name)
return self.execute_kubectl(command, **cli_options)
def wait_for_pods(self, number_of_pods=1, state='Running', **cli_options):
start_time = int(time.time())
while True:
pods = self.get_resource('pods', **cli_options)
print("pods:")
print(pods)
print (len(pods['items']))
if len(pods['items']) == number_of_pods:
running_pods = 0
for pod in pods['items']:
print (pod['status']['phase'])
if pod['status']['phase'] != state:
print("Pod '{0}' not {1} is {2}!".format(
pod['metadata']['name'], state,
pod['status']['phase']))
break
else:
running_pods += 1
if running_pods == number_of_pods:
return pods
if int(time.time()) - start_time > 300:
pod_states = {}
for p in pods.get('items', []):
pod_states[p['metadata']['name']] = p['status']['phase']
raise Exception(
'Timeout Exception: pods did not start\n'
'Expect number of pods {0} vs number of pods found {1}:\n'
'Pod states: {2}'.format(
number_of_pods, len(pod_states), pod_states))
time.sleep(5)
def wait_for_pod(self, name, state='Running', **cli_options):
"""
If a pod name is known, wait for pod to start
"""
start_time = int(time.time())
while True:
pod = self.get_resource('pod', name=name, **cli_options)
if pod['status']['phase'] != state:
print("Pod '{0}' not {1} is {2}!".format(
pod['metadata']['name'], state, pod['status']['phase']))
else:
time.sleep(15)
return pod
if int(time.time()) - start_time > 300:
raise Exception(
'Timeout Exception: pod {} did not start\n'.format(name))
time.sleep(5)
def apply_conformance_tests(self):
command = "apply -f {0}".format(CONFORMANCE_YAML)
result = self.execute_kubectl_cmd(command)
assert result.ok, (
"Failed to apply sonobuoy-conformance.yaml.\nCommand: '{0}'\n"
"stdout: {1}\nstderr:{2}\n".format(
command, result.stdout, result.stderr))
return result
def run_command(self, command):
return subprocess.check_output(command, shell=True, text=True)
def run_command_with_stderr(self, command):
try:
return subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
print(e.output)
print(e.stderr)
output = e.output
returncode = e.returncode
print(returncode)
| 8,702 | 36.351931 | 78 |
py
|
rancher
|
rancher-master/tests/validation/lib/__init__.py
| 0 | 0 | 0 |
py
|
|
rancher
|
rancher-master/tests/validation/lib/cloud_provider.py
|
import abc
import os
from invoke import run
class CloudProviderBase(object, metaclass=abc.ABCMeta):
DOCKER_VERSION = os.environ.get("DOCKER_VERSION", '20.10')
DOCKER_INSTALLED = os.environ.get("DOCKER_INSTALLED", "false")
@abc.abstractmethod
def create_node(self, node_name, wait_for_ready=False):
raise NotImplementedError
@abc.abstractmethod
def stop_node(self, node, wait_for_stop=False):
raise NotImplementedError
@abc.abstractmethod
def delete_node(self, wait_for_delete=False):
raise NotImplementedError
@abc.abstractmethod
def import_ssh_key(self, ssh_key_name, public_ssh_key):
raise NotImplementedError
@abc.abstractmethod
def delete_ssh_key(self, ssh_key_name):
raise NotImplementedError
def save_master_key(self, ssh_key_name, ssh_key):
if not os.path.isfile('.ssh/{}'.format(ssh_key_name)):
run('mkdir -p .ssh')
with open('.ssh/{}'.format(ssh_key_name), 'w') as f:
f.write(ssh_key)
run("chmod 0600 .ssh/{0}".format(ssh_key_name))
run("cat .ssh/{}".format(ssh_key_name))
def generate_ssh_key(self, ssh_key_name, ssh_key_passphrase=''):
try:
if not os.path.isfile('.ssh/{}'.format(ssh_key_name)):
run('mkdir -p .ssh && rm -rf .ssh/{}'.format(ssh_key_name))
run("ssh-keygen -N '{1}' -C '{0}' -f .ssh/{0}".format(
ssh_key_name, ssh_key_passphrase))
run("chmod 0600 .ssh/{0}".format(ssh_key_name))
public_ssh_key = self.get_ssh_key(ssh_key_name + '.pub')
except Exception as e:
raise Exception("Failed to generate ssh key: {0}".format(e))
return public_ssh_key
def get_ssh_key(self, ssh_key_name):
with open(self.get_ssh_key_path(ssh_key_name), 'r') as f:
ssh_key = f.read()
return ssh_key
def get_ssh_key_path(self, ssh_key_name):
key_path = os.path.abspath('.ssh/{}'.format(ssh_key_name))
return key_path
| 2,074 | 34.169492 | 75 |
py
|
rancher
|
rancher-master/tests/validation/lib/rke_client.py
|
import os
import jinja2
import logging
import tempfile
import time
import subprocess
from yaml import load
logging.getLogger('invoke').setLevel(logging.WARNING)
DEBUG = os.environ.get('DEBUG', 'false')
DEFAULT_CONFIG_NAME = 'cluster.yml'
DEFAULT_NETWORK_PLUGIN = os.environ.get('DEFAULT_NETWORK_PLUGIN', 'canal')
K8S_VERSION = os.environ.get('RANCHER_K8S_VERSION', "")
class RKEClient(object):
"""
Wrapper to interact with the RKE cli
"""
def __init__(self, master_ssh_key_path, template_path):
self.master_ssh_key_path = master_ssh_key_path
self.template_path = template_path
self._working_dir = tempfile.mkdtemp()
self._hide = False if DEBUG.lower() == 'true' else True
def _run(self, command):
print('Running command: {}'.format(command))
start_time = time.time()
result = self.run_command('cd {0} && {1}'.format(self._working_dir,
command))
end_time = time.time()
print('Run time for command {0}: {1} seconds'.format(
command, end_time - start_time))
return result
def up(self, config_yml, config=None):
yml_name = config if config else DEFAULT_CONFIG_NAME
self._save_cluster_yml(yml_name, config_yml)
cli_args = '' if config is None else ' --config {0}'.format(config)
result = self._run("rke up {0}".format(cli_args))
print(
"RKE kube_config:\n{0}".format(self.get_kube_config_for_config()))
return result
def remove(self, config=None):
result = self._run("rke remove --force")
return result
def build_rke_template(self, template, nodes, **kwargs):
"""
This method builds RKE cluster.yml from a template,
and updates the list of nodes in update_nodes
"""
render_dict = {
'master_ssh_key_path': self.master_ssh_key_path,
'network_plugin': DEFAULT_NETWORK_PLUGIN,
'k8s_version': K8S_VERSION
}
render_dict.update(kwargs) # will up master_key if passed in
node_index = 0
for node in nodes:
node_dict = {
'ssh_user_{}'.format(node_index): node.ssh_user,
'ip_address_{}'.format(node_index): node.public_ip_address,
'dns_hostname_{}'.format(node_index): node.host_name,
'ssh_key_path_{}'.format(node_index): node.ssh_key_path,
'ssh_key_{}'.format(node_index): node.ssh_key,
'internal_address_{}'.format(node_index):
node.private_ip_address,
'hostname_override_{}'.format(node_index):
node.node_name
}
render_dict.update(node_dict)
node_index += 1
yml_contents = jinja2.Environment(
loader=jinja2.FileSystemLoader(self.template_path)
).get_template(template).render(render_dict)
print("Generated cluster.yml contents:\n", yml_contents)
nodes = self.update_nodes(yml_contents, nodes)
return yml_contents, nodes
@staticmethod
def convert_to_dict(yml_contents):
return load(yml_contents)
def update_nodes(self, yml_contents, nodes):
"""
This maps some rke logic for how the k8s nodes is configured to
the nodes created by the cloud provider, so that the nodes list
is the source of truth to validated against kubectl calls
"""
yml_dict = self.convert_to_dict(yml_contents)
for dict_node in yml_dict['nodes']:
for node in nodes:
if node.public_ip_address == dict_node['address'] or \
node.host_name == dict_node['address']:
# dep
node.host_name = dict_node['address']
if dict_node.get('hostname_override'):
node.node_name = dict_node['hostname_override']
else:
node.node_name = node.host_name
node.roles = dict_node['role']
# if internal_address is given, used to communicate
# this is the expected ip/value in nginx.conf
node.node_address = node.host_name
if dict_node.get('internal_address'):
node.node_address = dict_node['internal_address']
break
return nodes
def _save_cluster_yml(self, yml_name, yml_contents):
file_path = "{}/{}".format(self._working_dir, yml_name)
with open(file_path, 'w') as f:
f.write(yml_contents)
def get_kube_config_for_config(self, yml_name=DEFAULT_CONFIG_NAME):
file_path = "{}/kube_config_{}".format(self._working_dir, yml_name)
with open(file_path, 'r') as f:
kube_config = f.read()
return kube_config
def kube_config_path(self, yml_name=DEFAULT_CONFIG_NAME):
return os.path.abspath(
"{}/kube_config_{}".format(self._working_dir, yml_name))
def save_kube_config_locally(self, yml_name=DEFAULT_CONFIG_NAME):
file_name = 'kube_config_{}'.format(yml_name)
contents = self.get_kube_config_for_config(yml_name)
with open(file_name, 'w') as f:
f.write(contents)
def run_command(self, command):
return subprocess.check_output(command, shell=True, text=True)
def run_command_with_stderr(self, command):
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.output
returncode = e.returncode
print(returncode)
| 5,848 | 38.52027 | 78 |
py
|
rancher
|
rancher-master/tests/validation/images/container-utils/app.py
|
from flask import Flask, request
import os
import random
import requests
import socket
from string import ascii_letters, digits
from subprocess import call
TEMP_DIR = os.path.dirname(os.path.realpath(__file__)) + '/temp'
app = Flask(__name__)
def generate_random_file_name():
name = ''.join(random.choice(ascii_letters + digits) for _ in list(range(35)))
return "{0}/{1}.txt".format(TEMP_DIR, name)
@app.route('/')
def home():
return "welcome to container-utils"
@app.route('/metadata/<path:path>', methods=['GET'])
def get_metadata(path):
accept_type = request.headers.get('Accept')
headers = {'Accept': accept_type} if accept_type else None
url = "http://rancher-metadata/%s" % path
try:
response = requests.get(url=url, headers=headers)
except Exception as e:
return "Error: {0}".format(e), 400
if not response.ok:
return response.content, response.status_code
return response.content, 200
@app.route('/hostname', methods=['GET'])
def get_hostname():
return str(socket.gethostname())
@app.route('/env', methods=['GET'])
def get_environment_varable():
if 'var' not in request.args:
return "Required param 'var' is missing", 400
var = request.args['var']
if var not in os.environ:
return "Not found '{0}' in environment variables".format(var), 404
return str(os.environ[var])
@app.route('/proxy', methods=['GET'])
def proxy():
url = request.args.get('url')
link = request.args.get('link')
port = request.args.get('port')
path = request.args.get('path')
if link is not None and port is not None and path is not None:
link = link.upper()
dest_port = os.environ.get(link + "_PORT_" + port + "_TCP_PORT")
dest_host = os.environ.get(link + "_PORT_" + port + "_TCP_ADDR")
err_msg = "Not found '{0}' in environment variables"
if dest_port is None:
return err_msg.format(dest_port), 404
if dest_host is None:
return err_msg.format(dest_host), 404
url = 'http://{0}:{1}/{2}'.format(dest_host, dest_port, path)
if url is None:
return ("Required param missing: Either 'url', or all params "
"'link', 'port' and 'path' are required"), 400
try:
response = requests.get(url=url)
except Exception as e:
return "Error: {0}".format(e), 400
if not response.ok:
return response.content, response.status_code
return response.content, 200
@app.route('/dig', methods=['GET'])
def get_dig_info():
if 'host' not in request.args:
return "Required param 'host' is missing", 400
host = request.args['host']
temp_file = generate_random_file_name()
try:
with open(temp_file, 'w') as f:
call(['dig', host, '+short'], stdout=f)
with open(temp_file, 'r') as f:
content = f.read()
except Exception as e:
content = "Error: {0}".format(e)
finally:
if os.path.isfile(temp_file):
os.remove(temp_file)
return content
@app.route('/ping', methods=['GET'])
def health_check():
return 'ping'
if __name__ == '__main__':
if not os.path.isdir(TEMP_DIR):
os.makedirs(TEMP_DIR)
app.run(debug=True, host='0.0.0.0')
| 3,284 | 28.070796 | 82 |
py
|
semanticilp-aaai
|
semanticilp-aaai/other/generateHashForEachQuestion.py
|
## I used this because I need to convert each question to a hash value
import csv
import hashlib
fourth = True
if fourth:
questions_file = "/Users/daniel/ideaProjects/TextILP/other/questionSets/Public-Feb2016-Elementary-NDMC-Test.tsv"
lucene_predictions = "/Users/daniel/ideaProjects/TextILP/other/lucene-public-test-4th.csv"
textilp_predictions = "/Users/daniel/ideaProjects/TextILP/other/predictionPerQuestion-public-8thgrade-test-40-questions.txt"
else:
questions_file = "/Users/daniel/ideaProjects/TextILP/other/questionSets/Public-Gr08-Test.tsv"
lucene_predictions = "/Users/daniel/ideaProjects/TextILP/other/lucene-public-test-8th3.csv"
textilp_predictions = "/Users/daniel/ideaProjects/TextILP/other/predictionPerQuestion-public-4thgrade-test-40-questions.txt"
lucene_selected_per_hash = {}
lucene_score_per_hash = {}
question_to_hash_map = {}
hash_to_questin_map = {}
textilp_score_per_question = {}
textilp_selected_per_question = {}
list_of_questions = []
def get_complete_question(q_only):
print(len(list_of_questions))
for q in list_of_questions:
if q_only in q:
return q
print("didn't find .. . . " + q_only)
return ""
def main():
with open(questions_file) as tsvfile:
csvreader = csv.reader(tsvfile, delimiter="\t")
for line in csvreader:
#print(line[0])
list_of_questions.append(line[0])
with open(lucene_predictions) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
for line in csvreader:
print(line)
print(line[2])
print(line[8])
if(fourth):
if(line[2] == '1.0'):
lucene_selected_per_hash[line[0]] = line[1]
lucene_score_per_hash[line[0]] = (line[2] == '1.0' and line[8] == '1')
else:
if(line[2] == '1'):
lucene_selected_per_hash[line[0]] = line[1]
lucene_score_per_hash[line[0]] = (line[2] == '1' and line[3] == '1')
with open(textilp_predictions) as tsvfile:
tsvreader = csv.reader(tsvfile, delimiter="\t")
for line in tsvreader:
#print(line)
full_question = get_complete_question(line[0]) # .encode("utf-8")
#print(full_question)
hash = hashlib.sha1(full_question.encode("utf-8")).hexdigest()[:8]
question_to_hash_map[line[0]] = hash
hash_to_questin_map[hash] = line[0]
textilp_selected_per_question[line[0]] = line[1]
textilp_score_per_question[line[0]] = line[1]==line[2]
#print(hashlib.sha1(line[0].encode("utf-8")).hexdigest()[:8])
print(len(lucene_selected_per_hash))
print(len(lucene_score_per_hash))
print(len(textilp_score_per_question))
print(len(hash_to_questin_map))
print(len(question_to_hash_map))
# for each question, check whether the hash for lucene prediction exists or not, and if so, calculate the overlap
both_correct = 0
both_incorrect = 0
textilp_correct_only = 0
lucene_correct_only = 0
textilp_correct = 0
lucene_correct = 0
total = 0
f1 = open('/Users/daniel/ideaProjects/TextILP/other/anlaysis/SilpCorrectButLuceneIncorrect.txt', 'w+')
f2 = open('/Users/daniel/ideaProjects/TextILP/other/anlaysis/SilpInorrectButLuceneCorrect.txt', 'w+')
print(lucene_score_per_hash)
for q, hash in question_to_hash_map.items():
if hash in lucene_score_per_hash:
total = total + 1
textilp_score = textilp_score_per_question[q]
lucene_score = lucene_score_per_hash[hash]
print(textilp_score)
print(lucene_score)
if lucene_score:
lucene_correct = lucene_correct + 1
if textilp_score:
textilp_correct = textilp_correct + 1
if textilp_score and lucene_score:
both_correct = both_correct + 1
elif not textilp_score and lucene_score:
f2.write(q + "\n")
lucene_correct_only = lucene_correct_only + 1
elif not lucene_score and textilp_score:
f1.write(q + "\n")
textilp_correct_only = textilp_correct_only + 1
else:
both_incorrect = both_incorrect + 1
else:
print("hash does not exist in the map ")
print(hash)
f2.close()
f1.close()
print(both_correct)
print(lucene_correct_only)
print(textilp_correct_only)
print(both_incorrect)
print(total)
print("textilp: " + str(textilp_correct / total))
print("lucene: " + str(lucene_correct / total))
import matplotlib.pyplot as plt
#
# Data to plot
labels = 'Both\nCorrect', 'Lucene\n correct only', 'SemanticILP\ncorrect only', 'None correct'
sizes = [both_correct, lucene_correct_only, textilp_correct_only, both_incorrect]
colors = ['gold', 'yellowgreen','lightskyblue', 'lightcoral']
explode = (0.0, 0, 0, 0) # explode 1st slice
# Plot
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=False, startangle=0)
plt.axis('equal')
if(fourth):
plt.xlabel('Overlap of the predictions, on AI2Public-4th')
else:
plt.xlabel('Overlap of the predictions, on AI2Public-8th')
plt.show()
if __name__ == '__main__':
main()
| 5,448 | 37.373239 | 128 |
py
|
prifi-experiments
|
prifi-experiments-master/remove-prefix.py
|
import sys
import re
def processFile(inFile, outFile):
linesOut = []
with open(inFile) as file:
rawData = file.read()
lines = rawData.split("\n")
for line in lines:
line2 = ""
if re.search("1 : time_statistics.go:87 \\(log.\\(\\*TimeStatistics\\)\\.ReportWithInfo\\)(\\s+)\\- ", line) is not None:
line2 = re.sub("1 : time_statistics.go:87 \\(log.\\(\\*TimeStatistics\\)\\.ReportWithInfo\\)(\\s+)\\- ", "", line)
elif re.search("1 : bw_statistics.go:107 \\(log.\\(\\*BitrateStatistics\\)\\.ReportWithInfo\\)(\\s+)\\- ", line) is not None:
line2 = re.sub("1 : bw_statistics.go:107 \\(log.\\(\\*BitrateStatistics\\)\\.ReportWithInfo\\)(\\s+)\\- ", "", line)
elif "ReportWithInfo: 114) - " in line:
line2 = line[line.find("ReportWithInfo: 114) - ")+"ReportWithInfo: 114) - ".__len__():]
elif "ReportWithInfo: 114) - " in line:
line2 = line[line.find("ReportWithInfo: 114) - ")+"ReportWithInfo: 114) - ".__len__():]
elif "StopMeasureAndLogWithInfo: 68) - " in line:
line2 = line[line.find("StopMeasureAndLogWithInfo: 68) - ")+"StopMeasureAndLogWithInfo: 68) - ".__len__():]
elif "log.(*BitrateStatistics).ReportWithInfo: 107) - " in line:
line2 = line[line.find("log.(*BitrateStatistics).ReportWithInfo: 107) - ")+"log.(*BitrateStatistics).ReportWithInfo: 107) - ".__len__():]
elif "ReceivedPcap: 60) - " in line:
line2 = line[line.find("ReceivedPcap: 60) - ")+"ReceivedPcap: 60) - ".__len__():]
if line2 != "":
linesOut.append(line2)
with open(outFile, "w") as file:
for line in linesOut:
file.write(line+"\n")
if len(sys.argv) < 2:
print "Argument 1 must be the input file"
sys.exit(1)
if len(sys.argv) < 3:
print "Argument 2 must be the output file"
sys.exit(1)
a = str(sys.argv[1])
b = str(sys.argv[2])
processFile(a, b)
| 2,040 | 46.465116 | 153 |
py
|
prifi-experiments
|
prifi-experiments-master/pcap-individuals.py
|
import sys
import re
import json
import collections
regex1str = "{\"pcap_time_diff\": \"([-\d]+)\""
regex1 = re.compile(regex1str)
json = []
pcapsNames = []
nClientsVals = []
nActiveClientsVals = []
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def parseGreppedToObject(inFile):
data = []
with open(inFile) as file:
rawData = file.read()
lines = rawData.split("\n")
# parse the data
for line in lines:
if line.strip() == "":
continue
lineout = {}
line = line.replace('.txt:[0m[0;33m1 : ( utils.(*PCAPLog).Print: 115) - PCAPLog-individuals: ', '');
parts = line.split(' ');
filename = parts[0];
if "logs/" in filename:
filename = filename.replace('logs/', '')
filename.strip()
data = parts[1].strip()
filenamedata = filename.split('_')
lineout['pcap'] = filenamedata[1].replace('.pcap', '')
lineout['nclients'] = filenamedata[2]
lineout['nactiveclients'] = filenamedata[3]
lineout['repeat'] = filenamedata[4]
lineout['data'] = data.split(';')
json.append(lineout)
if lineout['pcap'] not in pcapsNames:
pcapsNames.append(lineout['pcap'])
if lineout['nclients'] not in nClientsVals:
nClientsVals.append(lineout['nclients'])
if lineout['nactiveclients'] not in nActiveClientsVals:
nActiveClientsVals.append(lineout['nactiveclients'])
#print lineout
if len(sys.argv) < 2:
print "Argument 1 must be the input file"
sys.exit(1)
a = str(sys.argv[1])
parseGreppedToObject(a)
for pcap in pcapsNames:
for nactiveclient in nActiveClientsVals:
mergedData = {}
for line in json:
if line['pcap'] == pcap and line['nactiveclients'] == nactiveclient:
if not line['nclients'] in mergedData:
mergedData[line['nclients']] = []
mergedData[line['nclients']].extend(filter(is_number, line['data']))
od = collections.OrderedDict(sorted(mergedData.items()))
for nclients, data in od.items():
with open("individualpcaps_"+pcap+"_"+nactiveclient+"_"+nclients+".gnudata", "w") as file:
for v in data:
file.write(v+",\n")
| 2,517 | 27.942529 | 149 |
py
|
prifi-experiments
|
prifi-experiments-master/fetch-info-dual.py
|
#!/usr/bin/python2
import os
import sys
import json
data = []
if len(sys.argv) < 2:
print "Argument 1 must be the features to extract"
sys.exit(1)
feature = sys.argv[1]
outLines = []
def processFile(inFile):
global logFolder
data = []
with open(logFolder + inFile) as file:
rawData = file.read()
rawData = rawData.replace("},\n]", "}]")
j = json.loads(rawData)
for obj in j:
take=False
for key in obj:
if feature in key or feature in obj[key]:
take=True
if take:
key = inFile.replace("experiment_", "").replace(".json", "")
if "_" in key :
parts = key.split("_")
p1 = int(parts[0])
p2 = int(parts[1])
repeat = int(parts[2])
obj["_key2"] = int(p1)
obj["_key"] = int(p2)
obj["_repeat"] = int(repeat)
else:
obj["_key"] = int(key)
outLines.append(obj)
logFolder = "logs/"
# list all files in dir
files = []
for (dirpath, dirnames, filenames) in os.walk(logFolder):
files = filenames
break
for file in files:
if ".json" not in file:
continue
processFile(file)
outLines.sort(key=lambda x: (x["_key"], x["_key2"]))
for line in outLines:
print json.dumps(line, sort_keys=True)+","
| 1,462 | 22.596774 | 76 |
py
|
prifi-experiments
|
prifi-experiments-master/remove-prefix-old-format.py
|
import sys
def processFile(inFile, outFile):
linesOut = []
with open(inFile) as file:
rawData = file.read()
lines = rawData.split("\n")
for line in lines:
line2 = ""
if "ReportWithInfo: 78) - " in line:
line2 = line[line.find("ReportWithInfo: 78) - ")+"ReportWithInfo: 78) - ".__len__():]
elif "ReportWithInfo: 114) - " in line:
line2 = line[line.find("ReportWithInfo: 114) - ")+"ReportWithInfo: 114) - ".__len__():]
elif "StopMeasureAndLogWithInfo: 68) - " in line:
line2 = line[line.find("StopMeasureAndLogWithInfo: 68) - ")+"StopMeasureAndLogWithInfo: 68) - ".__len__():]
elif "log.(*BitrateStatistics).ReportWithInfo: 107) - " in line:
line2 = line[line.find("log.(*BitrateStatistics).ReportWithInfo: 107) - ")+"log.(*BitrateStatistics).ReportWithInfo: 107) - ".__len__():]
elif "ReceivedPcap: 60) - " in line:
line2 = line[line.find("ReceivedPcap: 60) - ")+"ReceivedPcap: 60) - ".__len__():]
if line2 != "":
linesOut.append(line2)
with open(outFile, "w") as file:
for line in linesOut:
file.write(line+"\n")
if len(sys.argv) < 2:
print "Argument 1 must be the input file"
sys.exit(1)
if len(sys.argv) < 3:
print "Argument 2 must be the output file"
sys.exit(1)
a = str(sys.argv[1])
b = str(sys.argv[2])
processFile(a, b)
| 1,492 | 38.289474 | 153 |
py
|
prifi-experiments
|
prifi-experiments-master/convert-json.py
|
import sys
import re
import json
regex1str = "\[([\d]+)\] ([\d\.]+) round\/sec, ([\d\.]+) kB\/s up, ([\d\.]+) kB\/s down, ([\d\.]+) kB\/s down\(udp\), ([\d\.]+) kB\/s down\(re-udp\)"
regex1 = re.compile(regex1str)
regex2str = "\[([-\d]+)\] ([-\d\.]+) ms \+- ([-\d\.]+) \(over ([-\d\.]+), happened ([-\d\.]+)\)\. Info: ([\w\-_]+)"
regex2 = re.compile(regex2str)
regex3str = "\[StopMeasureAndLog\] measured time for ([\w\-_]+) : ([\d\.]+) ns, info: ([\w\-_]+)"
regex3 = re.compile(regex3str)
regex4str = "Received pcap ([\d\.]+) ([\d\.]+) ([\d\.]+)"
regex4 = re.compile(regex4str)
def processFile(inFile, outFile):
data = []
with open(inFile) as file:
rawData = file.read()
lines = rawData.split("\n")
#find the last report, i.e. the steady state
maxReportId = -1
for line in lines:
reportIdMatches = re.findall("\[([\d]+)\]", line)
if len(reportIdMatches) == 0:
continue
reportId = reportIdMatches[0]
if int(reportId) > maxReportId:
maxReportId = int(reportId)
#print "Max report id is "+str(maxReportId)
# filter by latest report (most stable)
interestingData = []
for line in lines:
if "[0]" not in line and "[1]" not in line:
interestingData.append(line)
# parse the data
for line in interestingData:
parsed = regex1.findall(line)
if len(parsed) > 0:
parsed = parsed[0]
throughputData = {}
throughputData["reportId"] = parsed[0]
throughputData["round_s"] = parsed[1]
throughputData["tp_up"] = parsed[2]
throughputData["tp_down"] = parsed[3]
throughputData["tp_udp_down"] = parsed[4]
throughputData["tp_udp_re_down"] = parsed[5]
data.append(throughputData)
parsed = regex2.findall(line)
if len(parsed) > 0:
parsed = parsed[0]
durationData = {}
durationData["reportId"] = parsed[0]
durationData["duration_mean"] = parsed[1]
durationData["duration_conf"] = parsed[2]
durationData["nsamples"] = parsed[3]
durationData["popsize"] = parsed[4]
durationData["text"] = parsed[5]
data.append(durationData)
parsed = regex3.findall(line)
if len(parsed) > 0:
parsed = parsed[0]
timingData = {}
timingData["measure_name"] = parsed[0]
timingData["duration_ns"] = parsed[1]
timingData["info"] = parsed[2]
data.append(timingData)
parsed = regex4.findall(line)
if len(parsed) > 0:
parsed = parsed[0]
timingData = {}
timingData["pcap_id"] = parsed[0]
timingData["pcap_time_diff"] = parsed[1]
timingData["pcap_size"] = parsed[2]
data.append(timingData)
with open(outFile, "w") as file:
file.write("[")
for line in data:
file.write(json.dumps(line, sort_keys=False)+",\n")
file.write("]")
if len(sys.argv) < 2:
print "Argument 1 must be the input file"
sys.exit(1)
if len(sys.argv) < 3:
print "Argument 2 must be the output file"
sys.exit(1)
a = str(sys.argv[1])
b = str(sys.argv[2])
processFile(a, b)
| 3,530 | 35.402062 | 149 |
py
|
prifi-experiments
|
prifi-experiments-master/fetch-info.py
|
#!/usr/bin/python2
import os
import sys
import json
data = []
if len(sys.argv) < 2:
print "Argument 1 must be the features to extract"
sys.exit(1)
feature = sys.argv[1]
outLines = []
def processFile(inFile):
global logFolder
data = []
with open(logFolder + inFile) as file:
rawData = file.read()
rawData = rawData.replace("},\n]", "}]")
j = json.loads(rawData)
for obj in j:
take=False
for key in obj:
if feature in key or feature in obj[key]:
take=True
if take:
key = inFile.replace("experiment_", "").replace(".json", "")
if "_" in key :
p1 = key[0:key.find("_")]
p2 = key[key.find("_")+1:]
try:
obj["_key"] = int(p1)
except:
obj["_key"] = p1
obj["_key2"] = int(p2)
else:
obj["_key"] = int(key)
outLines.append(obj)
logFolder = "logs/"
# list all files in dir
files = []
for (dirpath, dirnames, filenames) in os.walk(logFolder):
files = filenames
break
for file in files:
if ".json" not in file:
continue
processFile(file)
outLines.sort(key=lambda x:x["_key"])
for line in outLines:
print json.dumps(line, sort_keys=True)+","
| 1,425 | 22 | 76 |
py
|
prifi-experiments
|
prifi-experiments-master/fig7b-churn-anonymity-set/anon_set_size_relative.py
|
#!/usr/bin/python3
import sys
import csv
import math
import numpy
MIN_DIFF = 0.1
assoc = []
disassoc = []
with open('cafe_association.csv', 'r') as csvfile:
f = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in f:
assoc.append(row)
with open('cafe_disassociation.csv', 'r') as csvfile:
f = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in f:
disassoc.append(row)
# print assoc
assocFiltered = []
lastIndexInOut = 0
assocFiltered.append((float(assoc[1][1]), assoc[1][2]))
i=2
while i<len(assoc):
diff = float(assoc[i][1]) - assocFiltered[lastIndexInOut][0]
if diff > MIN_DIFF:
assocFiltered.append( (float(assoc[i][1]), assoc[i][2]) )
lastIndexInOut += 1
i+=1
# print disassoc
disassocFiltered = []
lastIndexInOut = 0
disassocFiltered.append((float(disassoc[1][1]), disassoc[1][2]))
i=2
while i<len(disassoc):
diff = float(disassoc[i][1]) - disassocFiltered[lastIndexInOut][0]
if diff > MIN_DIFF:
disassocFiltered.append( (float(disassoc[i][1]), disassoc[i][2]) )
lastIndexInOut += 1
i+=1
# compute up-down time
bothFiltered = []
for a in assocFiltered:
bothFiltered.append((a[0], a[1], 'a'))
for a in disassocFiltered:
bothFiltered.append((a[0], a[1], 'd'))
bothFiltered = sorted(bothFiltered)
anonSet = [ '40:d6:43:bb:3d:c7' ] #this one device was already there, but disconnects at some point
# from _first10min.pcap
anonSet.append("00:0f:34:bd:4f:0c")
anonSet.append("00:16:6f:4d:0d:10")
anonSet.append("00:07:e9:86:11:c0")
anonSet.append("00:13:ce:aa:71:9b")
anonSet.append("00:13:ce:b1:94:84")
anonSet.append("00:11:24:86:30:d6")
anonSet.append("00:13:ce:67:a1:16")
anonSet.append("00:90:96:36:4b:a5")
anonSet.append("00:15:00:5d:a1:0c")
anonSet.append("00:11:24:fe:1c:7b")
anonSet.append("00:0e:9b:e2:68:ee")
anonSet.append("00:12:f0:10:1b:6d")
anonSet.append("00:14:a5:13:4e:83")
anonSet.append("00:0b:7d:ac:e9:bc")
anonSet.append("00:16:ce:1f:e1:f0")
anonSet.append("00:10:c6:3b:78:36")
anonSet.append("00:16:6f:34:f7:25")
anonSet.append("00:0f:f7:14:77:76")
anonSet.append("00:14:6c:f5:52:2b")
anonSet.append("00:90:4b:b6:6e:b1")
anonSet.append("00:90:4b:c8:cc:d4")
anonSet.append("00:11:85:17:ce:4c")
anonSet.append("00:16:6f:a4:5d:36")
anonSet.append("00:0f:34:64:23:ec")
anonSet.append("3c:da:92:6f:ce:f3")
anonSet.append("ff:ff:df:ed:ff:ff")
anonSet.append("ff:ff:ff:fd:ce:fe")
anonSet.append("01:00:0c:c1:a5:2d")
anonSet.append("00:0d:0b:5a:f6:3e")
anonSet.append("00:13:46:4d:85:68")
anonSet.append("00:16:b6:c9:67:b5")
anonSet.append("01:00:5e:e3:29:4f")
anonSet.append("00:12:f0:f4:82:e5")
anonSet.append("00:11:f5:6e:fa:bd")
anonSet.append("00:0c:41:42:eb:a9")
anonSet.append("00:14:a4:02:6d:3a")
anonSet.append("00:90:96:65:f4:fc")
anonSet.append("00:15:00:7b:f5:fb")
anonSet.append("00:11:24:40:20:07")
x=[]
y=[]
#print
anonSetOverTime = []
for e in bothFiltered:
if e[2] == 'a':
anonSet.append(e[1])
else:
if e[1] not in anonSet:
print(e[1], "Disconnected twice, ignoring")
else:
anonSet.remove(e[1])
anonSet = list(set(anonSet))
#print(e[0], len(anonSet))
x.append(e[0])
y.append(len(anonSet))
#find regression with numpy
regression = numpy.polyfit(x, y, 1)
f = open('cafe_anonymity_set_size_reg.txt', 'w')
f.write("%s %s\n" % (0, float(regression[1])))
f.write("%s %s\n" % (14400, float(14400*regression[0]+regression[1])))
f.close()
x=[]
y=[]
anonSet = [ '40:d6:43:bb:3d:c7' ] #this one device was already there, but disconnects at some point
# from _first10min.pcap
anonSet.append("00:0f:34:bd:4f:0c")
anonSet.append("00:16:6f:4d:0d:10")
anonSet.append("00:07:e9:86:11:c0")
anonSet.append("00:13:ce:aa:71:9b")
anonSet.append("00:13:ce:b1:94:84")
anonSet.append("00:11:24:86:30:d6")
anonSet.append("00:13:ce:67:a1:16")
anonSet.append("00:90:96:36:4b:a5")
anonSet.append("00:15:00:5d:a1:0c")
anonSet.append("00:11:24:fe:1c:7b")
anonSet.append("00:0e:9b:e2:68:ee")
anonSet.append("00:12:f0:10:1b:6d")
anonSet.append("00:14:a5:13:4e:83")
anonSet.append("00:0b:7d:ac:e9:bc")
anonSet.append("00:16:ce:1f:e1:f0")
anonSet.append("00:10:c6:3b:78:36")
anonSet.append("00:16:6f:34:f7:25")
anonSet.append("00:0f:f7:14:77:76")
anonSet.append("00:14:6c:f5:52:2b")
anonSet.append("00:90:4b:b6:6e:b1")
anonSet.append("00:90:4b:c8:cc:d4")
anonSet.append("00:11:85:17:ce:4c")
anonSet.append("00:16:6f:a4:5d:36")
anonSet.append("00:0f:34:64:23:ec")
anonSet.append("3c:da:92:6f:ce:f3")
anonSet.append("ff:ff:df:ed:ff:ff")
anonSet.append("ff:ff:ff:fd:ce:fe")
anonSet.append("01:00:0c:c1:a5:2d")
anonSet.append("00:0d:0b:5a:f6:3e")
anonSet.append("00:13:46:4d:85:68")
anonSet.append("00:16:b6:c9:67:b5")
anonSet.append("01:00:5e:e3:29:4f")
anonSet.append("00:12:f0:f4:82:e5")
anonSet.append("00:11:f5:6e:fa:bd")
anonSet.append("00:0c:41:42:eb:a9")
anonSet.append("00:14:a4:02:6d:3a")
anonSet.append("00:90:96:65:f4:fc")
anonSet.append("00:15:00:7b:f5:fb")
anonSet.append("00:11:24:40:20:07")
#print
f = open('cafe_anonymity_set_size_relative.txt', 'w')
anonSetOverTime = []
differenceWith100 = None
for e in bothFiltered:
time, mac = e[0], e[1]
association = (e[2] == 'a')
if association:
anonSet.append(mac)
else:
if mac not in anonSet:
print(mac, "Disconnected twice, ignoring")
else:
anonSet.remove(mac)
anonSet = list(set(anonSet))
newSize = len(anonSet)
# remove linear trend
regressionPoint = float(time)*regression[0] + regression[1]
# remove origin diff
if differenceWith100 is None:
differenceWith100 = 100 - newSize/regressionPoint * 100
diff = newSize/regressionPoint * 100 + differenceWith100
f.write("%s %s %s %s\n" % (e[0], regressionPoint, newSize, diff))
f.close()
| 5,821 | 27.125604 | 99 |
py
|
prifi-experiments
|
prifi-experiments-master/fig7b-churn-anonymity-set/anon_set_size.py
|
#!/usr/bin/python3
import sys
import csv
import math
import numpy
MIN_DIFF = 0.1
assoc = []
disassoc = []
with open('cafe_association.csv', 'r') as csvfile:
f = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in f:
assoc.append(row)
with open('cafe_disassociation.csv', 'r') as csvfile:
f = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in f:
disassoc.append(row)
# print assoc
assocFiltered = []
lastIndexInOut = 0
assocFiltered.append((float(assoc[1][1]), assoc[1][2]))
i=2
while i<len(assoc):
diff = float(assoc[i][1]) - assocFiltered[lastIndexInOut][0]
if diff > MIN_DIFF:
assocFiltered.append( (float(assoc[i][1]), assoc[i][2]) )
lastIndexInOut += 1
i+=1
# print disassoc
disassocFiltered = []
lastIndexInOut = 0
disassocFiltered.append((float(disassoc[1][1]), disassoc[1][2]))
i=2
while i<len(disassoc):
diff = float(disassoc[i][1]) - disassocFiltered[lastIndexInOut][0]
if diff > MIN_DIFF:
disassocFiltered.append( (float(disassoc[i][1]), disassoc[i][2]) )
lastIndexInOut += 1
i+=1
# compute up-down time
bothFiltered = []
for a in assocFiltered:
bothFiltered.append((a[0], a[1], 'a'))
for a in disassocFiltered:
bothFiltered.append((a[0], a[1], 'd'))
bothFiltered = sorted(bothFiltered)
anonSet = [ '40:d6:43:bb:3d:c7' ] #this one device was already there, but disconnects at some point
# from _first10min.pcap
anonSet.append("00:0f:34:bd:4f:0c")
anonSet.append("00:16:6f:4d:0d:10")
anonSet.append("00:07:e9:86:11:c0")
anonSet.append("00:13:ce:aa:71:9b")
anonSet.append("00:13:ce:b1:94:84")
anonSet.append("00:11:24:86:30:d6")
anonSet.append("00:13:ce:67:a1:16")
anonSet.append("00:90:96:36:4b:a5")
anonSet.append("00:15:00:5d:a1:0c")
anonSet.append("00:11:24:fe:1c:7b")
anonSet.append("00:0e:9b:e2:68:ee")
anonSet.append("00:12:f0:10:1b:6d")
anonSet.append("00:14:a5:13:4e:83")
anonSet.append("00:0b:7d:ac:e9:bc")
anonSet.append("00:16:ce:1f:e1:f0")
anonSet.append("00:10:c6:3b:78:36")
anonSet.append("00:16:6f:34:f7:25")
anonSet.append("00:0f:f7:14:77:76")
anonSet.append("00:14:6c:f5:52:2b")
anonSet.append("00:90:4b:b6:6e:b1")
anonSet.append("00:90:4b:c8:cc:d4")
anonSet.append("00:11:85:17:ce:4c")
anonSet.append("00:16:6f:a4:5d:36")
anonSet.append("00:0f:34:64:23:ec")
anonSet.append("3c:da:92:6f:ce:f3")
anonSet.append("ff:ff:df:ed:ff:ff")
anonSet.append("ff:ff:ff:fd:ce:fe")
anonSet.append("01:00:0c:c1:a5:2d")
anonSet.append("00:0d:0b:5a:f6:3e")
anonSet.append("00:13:46:4d:85:68")
anonSet.append("00:16:b6:c9:67:b5")
anonSet.append("01:00:5e:e3:29:4f")
anonSet.append("00:12:f0:f4:82:e5")
anonSet.append("00:11:f5:6e:fa:bd")
anonSet.append("00:0c:41:42:eb:a9")
anonSet.append("00:14:a4:02:6d:3a")
anonSet.append("00:90:96:65:f4:fc")
anonSet.append("00:15:00:7b:f5:fb")
anonSet.append("00:11:24:40:20:07")
x=[]
y=[]
#print
f = open('cafe_anonymity_set_size.txt', 'w')
anonSetOverTime = []
for e in bothFiltered:
if e[2] == 'a':
anonSet.append(e[1])
else:
if e[1] not in anonSet:
print(e[1], "Disconnected twice, ignoring")
else:
anonSet.remove(e[1])
anonSet = list(set(anonSet))
#print(e[0], len(anonSet))
x.append(e[0])
y.append(len(anonSet))
anonSetOverTime.append((e[0], len(anonSet)))
f.write("%s %s\n" % (e[0], len(anonSet)))
f.close()
#find regression with numpy
regression = numpy.polyfit(x, y, 1)
f = open('cafe_anonymity_set_size_reg.txt', 'w')
f.write("%s %s\n" % (0, float(regression[1])))
f.write("%s %s\n" % (14400, float(14400*regression[0]+regression[1])))
f.close()
#print
maxIncreaseCurveVal = -1
maxIncreaseDiff = -1
maxIncreasePos = -1
maxDecreaseCurveVal = -1
maxDecreaseDiff = -1
maxDecreasePos = -1
deltas = []
for e in anonSetOverTime:
regressionPoint = float(e[0]*regression[0]+regression[1])
delta = e[1] - regressionPoint
if delta > maxIncreaseDiff:
maxIncreaseDiff = delta
maxIncreasePos = e[0]
maxIncreaseCurveVal = e[1]
if delta < maxDecreaseDiff:
maxDecreaseDiff = delta
maxDecreasePos = e[0]
maxDecreaseCurveVal = e[1]
deltas.append(delta)
print("")
print("Max up/down points")
print(maxIncreasePos, maxIncreaseDiff)
print(maxDecreasePos, maxDecreaseDiff)
f = open('cafe_anonymity_set_size_max_up.txt', 'w')
f.write("%s %s\n" % (maxIncreasePos, maxIncreaseCurveVal))
f.write("%s %s\n" % (maxIncreasePos, maxIncreaseCurveVal - maxIncreaseDiff))
f.close()
f = open('cafe_anonymity_set_size_max_down.txt', 'w')
f.write("%s %s\n" % (maxDecreasePos, maxDecreaseCurveVal))
f.write("%s %s\n" % (maxDecreasePos, maxDecreaseCurveVal - maxDecreaseDiff))
f.close()
mean = 0
meanOfSquares = 0
for d in deltas:
mean += abs(d)
meanOfSquares += d*d
print("")
nsamples = float(len(deltas))
print("Mean of deltas:", float(mean) / nsamples)
print("Variance of deltas:", float(meanOfSquares) / nsamples)
print("Std dev of deltas:", math.sqrt(float(meanOfSquares) / nsamples))
| 5,020 | 26.587912 | 99 |
py
|
prifi-experiments
|
prifi-experiments-master/fig5a-hangouts-pcap-replay/bitratestats.py
|
import sys
import re
import json
import collections
import os
regex1str = "{\"pcap_time_diff\": \"([-\d]+)\""
regex1 = re.compile(regex1str)
json = []
pcapsNames = []
nClientsVals = []
nActiveClientsVals = []
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def parseGreppedToObject(inFile):
data = []
with open(inFile) as file:
rawData = file.read()
lines = rawData.split("\n")
# parse the data
for line in lines:
if line.strip() == "":
continue
lineout = {}
line = line.replace('.txt:[0m[0;33m1 : ( log.(*BitrateStatistics).ReportWithInfo: 107) -', '').replace(' ): ', ' ');
parts = line.split(' ');
filename = parts[0];
if "logs/" in filename:
filename = filename.replace('logs/', '')
filename.strip()
kbps_up = parts[4].strip()
filenamedata = filename.split('_')
lineout['pcap'] = filenamedata[1].replace('.pcap', '')
lineout['nclients'] = filenamedata[2]
lineout['nactiveclients'] = filenamedata[3]
lineout['repeat'] = filenamedata[4]
lineout['kbps_up'] = kbps_up
json.append(lineout)
if lineout['pcap'] not in pcapsNames:
pcapsNames.append(lineout['pcap'])
if lineout['nclients'] not in nClientsVals:
nClientsVals.append(lineout['nclients'])
if lineout['nactiveclients'] not in nActiveClientsVals:
nActiveClientsVals.append(lineout['nactiveclients'])
#print lineout
if len(sys.argv) < 2:
print "Argument 1 must be the input file"
sys.exit(1)
a = str(sys.argv[1])
parseGreppedToObject(a)
for pcap in pcapsNames:
f = "bitratestats_"+pcap+".gnudata"
if os.path.isfile(f):
os.remove(f)
for nactiveclient in nActiveClientsVals:
mergedData = {}
for line in json:
if line['pcap'] == pcap and line['nactiveclients'] == nactiveclient:
if not line['nclients'] in mergedData:
mergedData[line['nclients']] = []
#mergedData[line['nclients']].append(filter(is_number, line['kbps_up']))
if line['kbps_up'] != "" and line['kbps_up'] != ".":
mergedData[line['nclients']].append(float(line['kbps_up']))
od = collections.OrderedDict(sorted(mergedData.items()))
with open(f, "a") as file:
for nclients, data in od.items():
file.write(nclients+", \t"+str(max(data))+",\n")
| 2,706 | 27.797872 | 149 |
py
|
prifi-experiments
|
prifi-experiments-master/fig5a-hangouts-pcap-replay/pcap-individuals.py
|
import sys
import re
import json
import collections
regex1str = "{\"pcap_time_diff\": \"([-\d]+)\""
regex1 = re.compile(regex1str)
json = []
pcapsNames = []
nClientsVals = []
nActiveClientsVals = []
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def parseGreppedToObject(inFile):
data = []
with open(inFile) as file:
rawData = file.read()
lines = rawData.split("\n")
# parse the data
for line in lines:
if line.strip() == "":
continue
lineout = {}
line = line.replace('.txt:[0m[0;33m1 : ( utils.(*PCAPLog).Print: 117) - PCAPLog-individuals (', '').replace(' ): ', ' ');
parts = line.split(' ');
filename = parts[0];
if "logs/" in filename:
filename = filename.replace('logs/', '')
filename.strip()
data = parts[2].strip()
filenamedata = filename.split('_')
lineout['pcap'] = filenamedata[1].replace('.pcap', '')
lineout['nclients'] = filenamedata[2]
lineout['nactiveclients'] = filenamedata[3]
lineout['repeat'] = filenamedata[4]
lineout['data'] = data.split(';')
json.append(lineout)
if lineout['pcap'] not in pcapsNames:
pcapsNames.append(lineout['pcap'])
if lineout['nclients'] not in nClientsVals:
nClientsVals.append(lineout['nclients'])
if lineout['nactiveclients'] not in nActiveClientsVals:
nActiveClientsVals.append(lineout['nactiveclients'])
#print lineout
if len(sys.argv) < 2:
print "Argument 1 must be the input file"
sys.exit(1)
a = str(sys.argv[1])
parseGreppedToObject(a)
for pcap in pcapsNames:
for nactiveclient in nActiveClientsVals:
mergedData = {}
for line in json:
if line['pcap'] == pcap and line['nactiveclients'] == nactiveclient:
if not line['nclients'] in mergedData:
mergedData[line['nclients']] = []
mergedData[line['nclients']].extend(filter(is_number, line['data']))
od = collections.OrderedDict(sorted(mergedData.items()))
for nclients, data in od.items():
with open("individualpcaps_"+pcap+"_"+nactiveclient+"_"+nclients+".gnudata", "w") as file:
for v in data:
file.write(v+",\n")
| 2,539 | 28.195402 | 171 |
py
|
prifi-experiments
|
prifi-experiments-master/fig5a-hangouts-pcap-replay/data_explore.py
|
#!/usr/bin/python3
# usage: cat individualpcaps_.gnudata | ./stats.py
import fileinput
from pprint import pprint
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import sys
def try_parse_int(s, base=10):
try:
return int(s, base)
except ValueError:
return None
if len(sys.argv) < 2:
print("Argument 1 must be the features to extract")
sys.exit(1)
filename = sys.argv[1]
nPackets = 0;
data = [];
with open(filename) as file:
for line in file:
s = int(line.replace(',','').strip())
data.append(s)
plt.plot(data)
plt.xlabel('Latency');
plt.xlabel('Packets');
plt.title(filename);
plt.show();
| 658 | 17.828571 | 55 |
py
|
prifi-experiments
|
prifi-experiments-master/fig5a-hangouts-pcap-replay/data_explore2.py
|
#!/usr/bin/python3
# usage: cat individualpcaps_.gnudata | ./stats.py
from pathlib import Path
import sys
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def try_parse_int(s, base=10):
try:
return int(s, base)
except ValueError:
return None
def processPoint(fileScheme):
plotData = {}
pathlist = Path("logs").glob(fileScheme)
for path in pathlist:
filename = str(path)
with open(filename) as file:
fileData = []
for line in file:
needle = "PCAPLog-individuals "
if needle in line:
line = line[line.find(needle) + len(needle):].replace('( ', '').replace(' )', '').strip()
parts = line.split(':')
key = parts[0].strip()
data = parts[1].strip().split(';')
print(data)
data = [try_parse_int(x) for x in data if x != ""]
#fileData[key] = data
fileData.extend(data)
plotData[filename] = fileData
plt.plot(fileData, label=filename)
plt.ylabel('Latency');
plt.xlabel('Packets');
plt.title(fileScheme);
plt.legend(loc='best')
plt.show();
processPoint('experiment_skype.pcap_10_1_*.txt')
processPoint('experiment_skype.pcap_20_1_*.txt')
processPoint('experiment_skype.pcap_30_1_*.txt')
#processPoint('experiment_skype.pcap_50_1_*.txt')
#processPoint('experiment_skype.pcap_70_1_*.txt')
#processPoint('experiment_skype.pcap_90_1_*.txt')
| 1,550 | 29.411765 | 109 |
py
|
prifi-experiments
|
prifi-experiments-master/fig8-others-pcap-replay/bitratestats.py
|
import sys
import re
import json
import collections
import os
regex1str = "{\"pcap_time_diff\": \"([-\d]+)\""
regex1 = re.compile(regex1str)
json = []
pcapsNames = []
nClientsVals = []
nActiveClientsVals = []
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def parseGreppedToObject(inFile):
data = []
with open(inFile) as file:
rawData = file.read()
lines = rawData.split("\n")
# parse the data
for line in lines:
if line.strip() == "":
continue
lineout = {}
line = line.replace('.txt:[0m[0;33m1 : ( log.(*BitrateStatistics).ReportWithInfo: 107) -', '').replace(' ): ', ' ');
parts = line.split(' ');
filename = parts[0];
if "logs/" in filename:
filename = filename.replace('logs/', '')
filename.strip()
kbps_up = parts[4].strip()
filenamedata = filename.split('_')
lineout['pcap'] = filenamedata[1].replace('.pcap', '')
lineout['nclients'] = filenamedata[2]
lineout['nactiveclients'] = filenamedata[3]
lineout['repeat'] = filenamedata[4]
lineout['kbps_up'] = kbps_up
json.append(lineout)
if lineout['pcap'] not in pcapsNames:
pcapsNames.append(lineout['pcap'])
if lineout['nclients'] not in nClientsVals:
nClientsVals.append(lineout['nclients'])
if lineout['nactiveclients'] not in nActiveClientsVals:
nActiveClientsVals.append(lineout['nactiveclients'])
#print lineout
if len(sys.argv) < 2:
print "Argument 1 must be the input file"
sys.exit(1)
a = str(sys.argv[1])
parseGreppedToObject(a)
for pcap in pcapsNames:
f = "bitratestats_"+pcap+".gnudata"
if os.path.isfile(f):
os.remove(f)
for nactiveclient in nActiveClientsVals:
mergedData = {}
for line in json:
if line['pcap'] == pcap and line['nactiveclients'] == nactiveclient:
if not line['nclients'] in mergedData:
mergedData[line['nclients']] = []
#mergedData[line['nclients']].append(filter(is_number, line['kbps_up']))
if line['kbps_up'] != "" and line['kbps_up'] != ".":
mergedData[line['nclients']].append(float(line['kbps_up']))
od = collections.OrderedDict(sorted(mergedData.items()))
with open(f, "a") as file:
for nclients, data in od.items():
file.write(nclients+", \t"+str(max(data))+",\n")
| 2,706 | 27.797872 | 149 |
py
|
prifi-experiments
|
prifi-experiments-master/fig8-others-pcap-replay/pcap-individuals.py
|
import sys
import re
import json
import collections
regex1str = "{\"pcap_time_diff\": \"([-\d]+)\""
regex1 = re.compile(regex1str)
json = []
pcapsNames = []
nClientsVals = []
nActiveClientsVals = []
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def parseGreppedToObject(inFile):
data = []
with open(inFile) as file:
rawData = file.read()
lines = rawData.split("\n")
# parse the data
for line in lines:
if line.strip() == "":
continue
lineout = {}
line = line.replace('.txt:[0m[0;33m1 : ( utils.(*PCAPLog).Print: 117) - PCAPLog-individuals (', '').replace(' ): ', ' ');
parts = line.split(' ');
filename = parts[0];
if "logs/" in filename:
filename = filename.replace('logs/', '')
filename.strip()
data = parts[2].strip()
filenamedata = filename.split('_')
lineout['pcap'] = filenamedata[1].replace('.pcap', '')
lineout['nclients'] = filenamedata[2]
lineout['nactiveclients'] = filenamedata[3]
lineout['repeat'] = filenamedata[4]
lineout['data'] = data.split(';')
json.append(lineout)
if lineout['pcap'] not in pcapsNames:
pcapsNames.append(lineout['pcap'])
if lineout['nclients'] not in nClientsVals:
nClientsVals.append(lineout['nclients'])
if lineout['nactiveclients'] not in nActiveClientsVals:
nActiveClientsVals.append(lineout['nactiveclients'])
#print lineout
if len(sys.argv) < 2:
print "Argument 1 must be the input file"
sys.exit(1)
a = str(sys.argv[1])
parseGreppedToObject(a)
for pcap in pcapsNames:
for nactiveclient in nActiveClientsVals:
mergedData = {}
for line in json:
if line['pcap'] == pcap and line['nactiveclients'] == nactiveclient:
if not line['nclients'] in mergedData:
mergedData[line['nclients']] = []
mergedData[line['nclients']].extend(filter(is_number, line['data']))
od = collections.OrderedDict(sorted(mergedData.items()))
for nclients, data in od.items():
with open("individualpcaps_"+pcap+"_"+nactiveclient+"_"+nclients+".gnudata", "w") as file:
for v in data:
file.write(v+",\n")
| 2,539 | 28.195402 | 171 |
py
|
prifi-experiments
|
prifi-experiments-master/fig8-others-pcap-replay/data_explore.py
|
#!/usr/bin/python3
# usage: cat individualpcaps_.gnudata | ./stats.py
import fileinput
from pprint import pprint
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import sys
def try_parse_int(s, base=10):
try:
return int(s, base)
except ValueError:
return None
if len(sys.argv) < 2:
print("Argument 1 must be the features to extract")
sys.exit(1)
filename = sys.argv[1]
nPackets = 0;
data = [];
with open(filename) as file:
for line in file:
s = int(line.replace(',','').strip())
data.append(s)
plt.plot(data)
plt.xlabel('Latency');
plt.xlabel('Packets');
plt.title(filename);
plt.show();
| 658 | 17.828571 | 55 |
py
|
prifi-experiments
|
prifi-experiments-master/fig8-others-pcap-replay/data_explore2.py
|
#!/usr/bin/python3
# usage: cat individualpcaps_.gnudata | ./stats.py
from pathlib import Path
import sys
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def try_parse_int(s, base=10):
try:
return int(s, base)
except ValueError:
return None
def processPoint(fileScheme):
plotData = {}
pathlist = Path("logs").glob(fileScheme)
for path in pathlist:
filename = str(path)
with open(filename) as file:
fileData = []
for line in file:
needle = "PCAPLog-individuals "
if needle in line:
line = line[line.find(needle) + len(needle):].replace('( ', '').replace(' )', '').strip()
parts = line.split(':')
key = parts[0].strip()
data = parts[1].strip().split(';')
print(data)
data = [try_parse_int(x) for x in data if x != ""]
#fileData[key] = data
fileData.extend(data)
plotData[filename] = fileData
plt.plot(fileData, label=filename)
plt.ylabel('Latency');
plt.xlabel('Packets');
plt.title(fileScheme);
plt.legend(loc='best')
plt.show();
processPoint('experiment_skype.pcap_10_1_*.txt')
processPoint('experiment_skype.pcap_20_1_*.txt')
processPoint('experiment_skype.pcap_30_1_*.txt')
#processPoint('experiment_skype.pcap_50_1_*.txt')
#processPoint('experiment_skype.pcap_70_1_*.txt')
#processPoint('experiment_skype.pcap_90_1_*.txt')
| 1,550 | 29.411765 | 109 |
py
|
prifi-experiments
|
prifi-experiments-master/fig4-skype/bitratestats.py
|
import sys
import re
import json
import collections
import os
import numpy as np
import math
regex1str = "{\"pcap_time_diff\": \"([-\d]+)\""
regex1 = re.compile(regex1str)
json = []
pcapsNames = []
nClientsVals = []
nActiveClientsVals = []
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def parseGreppedToObject(inFile):
data = []
with open(inFile) as file:
rawData = file.read()
lines = rawData.split("\n")
# parse the data
for line in lines:
if line.strip() == "":
continue
lineout = {}
line = line.replace('.txt:[0m[0;33m1 : ( log.(*BitrateStatistics).ReportWithInfo: 107) -', '').replace(' ): ', ' ');
parts = line.split(' ');
filename = parts[0];
if "logs/" in filename:
filename = filename.replace('logs/', '')
filename.strip()
kbps_up = parts[4].strip()
filenamedata = filename.split('_')
lineout['pcap'] = filenamedata[1].replace('.pcap', '')
lineout['nclients'] = filenamedata[2]
lineout['nactiveclients'] = filenamedata[3]
lineout['repeat'] = filenamedata[4]
lineout['kbps_up'] = kbps_up
json.append(lineout)
if lineout['pcap'] not in pcapsNames:
pcapsNames.append(lineout['pcap'])
if lineout['nclients'] not in nClientsVals:
nClientsVals.append(lineout['nclients'])
if lineout['nactiveclients'] not in nActiveClientsVals:
nActiveClientsVals.append(lineout['nactiveclients'])
#print lineout
if len(sys.argv) < 2:
print "Argument 1 must be the input file"
sys.exit(1)
a = str(sys.argv[1])
parseGreppedToObject(a)
dataset_bitrate_max = 0.47
dataset_stddev = 0
for pcap in pcapsNames:
f = "bitratestats_"+pcap+".gnudata"
if os.path.isfile(f):
os.remove(f)
for nactiveclient in nActiveClientsVals:
mergedData = {}
for line in json:
if line['pcap'] == pcap and line['nactiveclients'] == nactiveclient:
if not line['nclients'] in mergedData:
mergedData[line['nclients']] = []
#mergedData[line['nclients']].append(filter(is_number, line['kbps_up']))
if line['kbps_up'] != "" and line['kbps_up'] != ".":
mergedData[line['nclients']].append(float(line['kbps_up']))
od = collections.OrderedDict(sorted(mergedData.items()))
with open(f, "a") as file:
for nclients, data in od.items():
val = max(data)
variance =np.std(data)
bitrate_used = math.ceil(0.02 * float(nclients)) * dataset_bitrate_max
file.write(nclients+", \t"+str(val)+", \t"+str(int(nclients)*val)+", \t"+str(variance)+", \t"+str(int(nclients)*variance)+", \t"+str(bitrate_used)+", \t"+str(dataset_stddev)+",\n")
| 3,074 | 29.147059 | 196 |
py
|
prifi-experiments
|
prifi-experiments-master/fig4-skype/pcap-individuals.py
|
import sys
import re
import json
import collections
regex1str = "{\"pcap_time_diff\": \"([-\d]+)\""
regex1 = re.compile(regex1str)
json = []
pcapsNames = []
nClientsVals = []
nActiveClientsVals = []
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def parseGreppedToObject(inFile):
data = []
with open(inFile) as file:
rawData = file.read()
lines = rawData.split("\n")
# parse the data
for line in lines:
if line.strip() == "":
continue
lineout = {}
line = line.replace('.txt:[0m[0;33m1 : ( utils.(*PCAPLog).Print: 117) - PCAPLog-individuals (', '').replace(' ): ', ' ');
parts = line.split(' ');
filename = parts[0];
if "logs/" in filename:
filename = filename.replace('logs/', '')
filename.strip()
data = parts[2].strip()
filenamedata = filename.split('_')
lineout['pcap'] = filenamedata[1].replace('.pcap', '')
lineout['nclients'] = filenamedata[2]
lineout['nactiveclients'] = filenamedata[3]
lineout['repeat'] = filenamedata[4]
lineout['data'] = data.split(';')
json.append(lineout)
if lineout['pcap'] not in pcapsNames:
pcapsNames.append(lineout['pcap'])
if lineout['nclients'] not in nClientsVals:
nClientsVals.append(lineout['nclients'])
if lineout['nactiveclients'] not in nActiveClientsVals:
nActiveClientsVals.append(lineout['nactiveclients'])
#print lineout
if len(sys.argv) < 2:
print "Argument 1 must be the input file"
sys.exit(1)
a = str(sys.argv[1])
parseGreppedToObject(a)
for pcap in pcapsNames:
for nactiveclient in nActiveClientsVals:
mergedData = {}
for line in json:
if line['pcap'] == pcap and line['nactiveclients'] == nactiveclient:
if not line['nclients'] in mergedData:
mergedData[line['nclients']] = []
mergedData[line['nclients']].extend(filter(is_number, line['data']))
od = collections.OrderedDict(sorted(mergedData.items()))
for nclients, data in od.items():
with open("individualpcaps_"+pcap+"_"+nactiveclient+"_"+nclients+".gnudata", "w") as file:
for v in data:
file.write(v+",\n")
| 2,539 | 28.195402 | 171 |
py
|
prifi-experiments
|
prifi-experiments-master/fig4-skype/data_explore.py
|
#!/usr/bin/python3
# usage: cat individualpcaps_.gnudata | ./stats.py
import fileinput
from pprint import pprint
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import sys
def try_parse_int(s, base=10):
try:
return int(s, base)
except ValueError:
return None
if len(sys.argv) < 2:
print("Argument 1 must be the features to extract")
sys.exit(1)
filename = sys.argv[1]
nPackets = 0;
data = [];
with open(filename) as file:
for line in file:
s = int(line.replace(',','').strip())
data.append(s)
plt.plot(data)
plt.xlabel('Latency');
plt.xlabel('Packets');
plt.title(filename);
plt.show();
| 658 | 17.828571 | 55 |
py
|
prifi-experiments
|
prifi-experiments-master/fig4-skype/data_explore2.py
|
#!/usr/bin/python3
# usage: cat individualpcaps_.gnudata | ./stats.py
from pathlib import Path
import sys
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def try_parse_int(s, base=10):
try:
return int(s, base)
except ValueError:
return None
def processPoint(fileScheme):
plotData = {}
pathlist = Path("logs").glob(fileScheme)
for path in pathlist:
filename = str(path)
with open(filename) as file:
fileData = []
for line in file:
needle = "PCAPLog-individuals "
if needle in line:
line = line[line.find(needle) + len(needle):].replace('( ', '').replace(' )', '').strip()
parts = line.split(':')
key = parts[0].strip()
data = parts[1].strip().split(';')
print(data)
data = [try_parse_int(x) for x in data if x != ""]
#fileData[key] = data
fileData.extend(data)
plotData[filename] = fileData
plt.plot(fileData, label=filename)
plt.ylabel('Latency');
plt.xlabel('Packets');
plt.title(fileScheme);
plt.legend(loc='best')
plt.show();
processPoint('experiment_skype.pcap_10_1_*.txt')
processPoint('experiment_skype.pcap_20_1_*.txt')
processPoint('experiment_skype.pcap_30_1_*.txt')
#processPoint('experiment_skype.pcap_50_1_*.txt')
#processPoint('experiment_skype.pcap_70_1_*.txt')
#processPoint('experiment_skype.pcap_90_1_*.txt')
| 1,550 | 29.411765 | 109 |
py
|
prifi-experiments
|
prifi-experiments-master/fig5b-icrc/pcap-individuals.py
|
import sys
import re
import json
import collections
regex1str = "{\"pcap_time_diff\": \"([-\d]+)\""
regex1 = re.compile(regex1str)
json = []
pcapsNames = []
payloads = []
def is_number(s):
try:
float(s)
if float(s) > 10:
return True
return False
except ValueError:
return False
def parseGreppedToObject(inFile):
data = []
with open(inFile) as file:
rawData = file.read()
lines = rawData.split("\n")
# parse the data
for line in lines:
if line.strip() == "":
continue
lineout = {}
line = line.replace('.txt:[0m[0;33m1 : ( utils.(*PCAPLog).Print: 125) - PCAPLog-individuals (', '').replace(' ): ', ' ');
parts = line.split(' ');
filename = parts[0];
if "logs/" in filename:
filename = filename.replace('logs/', '')
filename.strip()
client = parts[-3].strip()
data = parts[-1].strip()
filenamedata = filename.split('_')
lineout['pcap'] = filenamedata[0].replace('.pcap', '')
lineout['payload'] = filenamedata[1]
lineout['repeat'] = filenamedata[2]
lineout['data'] = data.split(';')
lineout['client'] = client
json.append(lineout)
if lineout['pcap'] not in pcapsNames:
pcapsNames.append(lineout['pcap'])
if lineout['payload'] not in payloads:
payloads.append(lineout['payload'])
#print lineout
if len(sys.argv) < 2:
print "Argument 1 must be the input file"
sys.exit(1)
a = str(sys.argv[1])
parseGreppedToObject(a)
for pcap in pcapsNames:
mergedData = {}
for line in json:
if line['pcap'] == pcap:
if not line['payload'] in mergedData:
mergedData[line['payload']] = []
mergedData[line['payload']].extend(filter(is_number, line['data']))
od = collections.OrderedDict(sorted(mergedData.items()))
for payload, data in od.items():
with open("individualpcaps_"+pcap+"_"+payload+".gnudata", "w") as file:
for v in data:
file.write(v+",\n")
| 2,280 | 25.835294 | 171 |
py
|
prifi-experiments
|
prifi-experiments-master/fig5b-icrc/script.py
|
out = []
with open('bkp') as f:
for line in f:
number = int(line.strip().replace(',',''))
if number > 10:
out.append(number)
with open('individualpcaps_experiment_10.gnudata', 'w') as f:
for val in out:
f.write(str(val)+",\n")
| 273 | 21.833333 | 61 |
py
|
prifi-experiments
|
prifi-experiments-master/fig3b-relatedwork/riffle-data/stats.py
|
import os
import sys
from os import listdir
import numpy as np
from os.path import isfile, join
files = [f for f in listdir('.') if isfile(f) and '.txt' in f and 'out_' in f]
files.sort()
stats = dict()
for file in files:
with open(file) as f:
content = f.read()
lines = content.split('\n')
fname = file.replace('out_', '').replace('.txt', '').split('_')
if len(fname) != 2:
print("Weird name:", fname)
sys.exit(1)
nclients = int(fname[0])
repeat = fname[1]
sharingkeys = lines[0].replace('sharing keys took ', '').replace('s', '')
totaltime = lines[2].replace('total time: took ', '').replace('s', '')
if not nclients in stats:
stats[nclients] = []
stats[nclients].append([sharingkeys, totaltime])
def mean(arr):
acc = 0
for v in arr:
acc += float(v)
return float(acc)/len(arr)
nclients = list(stats.keys())
nclients.sort()
with open("riffle.txt", "w") as f:
for nclient in nclients:
totaltimes = [1000*float(x[1]) for x in stats[nclient]] # go to ms
setuptimes = [1000*float(x[0]) for x in stats[nclient]] # go to ms
m = round(mean(totaltimes))
dev = round(np.std(totaltimes))
print(nclient, m, dev, round(mean(setuptimes)), round(np.std(setuptimes)))
f.write(str(nclient) + ",\t" + str(m) + ",\t" + str(dev) +"\n")
| 1,447 | 29.166667 | 83 |
py
|
nu_coincidence
|
nu_coincidence-main/setup.py
|
from setuptools import setup
import versioneer
import os
def find_config_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join("..", path, filename))
return paths
config_files = find_config_files("nu_coincidence/config")
setup(
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
include_package_data=True,
package_data={"": config_files},
)
| 504 | 17.703704 | 61 |
py
|
nu_coincidence
|
nu_coincidence-main/versioneer.py
|
# Version: 0.19
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/python-versioneer/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
* [![Latest Version][pypi-image]][pypi-url]
* [![Build Status][travis-image]][travis-url]
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere in your $PATH
* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
* run `versioneer install` in your source tree, commit the results
* Verify version information with `python setup.py version`
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes).
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/python-versioneer/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other languages) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## Similar projects
* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
dependency
* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
versioneer
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
[pypi-url]: https://pypi.python.org/pypi/versioneer/
[travis-image]:
https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
"""
import configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as f:
parser.read_file(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.19) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if 'build_py' in cmds:
_build_py = cmds['build_py']
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if 'sdist' in cmds:
_sdist = cmds['sdist']
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| 70,144 | 36.793642 | 87 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/coincidence.py
|
from abc import ABCMeta
import numpy as np
class CoincidenceCheck(object, metaclass=ABCMeta):
"""
Abstract base class for coincidence checks.
"""
def __init__(self):
pass
def check_spatial(self):
pass
def check_temporal(self):
pass
def check_spatial_coincidence(
event_ras,
event_decs,
event_ang_errs,
event_src_labels,
population_ras,
population_decs,
):
"""
Check the spatial coincidence of events
assuming circular error regions with the
sources in population, which are assumed to be points.
All angles should be in radians.
"""
n_match_spatial = 0
n_match_spatial_astro = 0
spatial_match_inds = []
# For each event
for e_ra, e_dec, e_ang_err, e_label in zip(
event_ras,
event_decs,
event_ang_errs,
event_src_labels,
):
# Check if source locations inside event circle
sigmas = get_central_angle(e_ra, e_dec, population_ras, population_decs)
match_selection = sigmas <= e_ang_err
n_match_spatial += len(match_selection[match_selection == True])
# Check if event is from astro component
if e_label == 1:
n_match_spatial_astro += len(match_selection[match_selection == True])
# Indices of sources which match this event
spatial_match_inds.append(np.where(match_selection == True)[0])
return (
n_match_spatial,
n_match_spatial_astro,
spatial_match_inds,
)
def count_spatial_coincidence(
event_ras,
event_decs,
event_ang_errs,
population_ras,
population_decs,
):
"""
Count the spatial coincidence of events
assuming circular error regions with the
sources in population, which are assumed to be points.
All angles should be in radians.
"""
n_match_spatial = 0
spatial_match_inds = []
# For each event
for e_ra, e_dec, e_ang_err in zip(event_ras, event_decs, event_ang_errs):
# Check if source locations inside event circle
sigmas = get_central_angle(e_ra, e_dec, population_ras, population_decs)
match_selection = sigmas <= e_ang_err
n_match_spatial += len(match_selection[match_selection == True])
# Indices of sources which match this event
spatial_match_inds.append(np.where(match_selection == True)[0])
# Store how many nu for each object, if non-zero
match_ids = []
for inds in spatial_match_inds:
if inds.size > 0:
match_ids.extend(inds)
return n_match_spatial, match_ids
def get_central_angle(ref_ra, ref_dec, ras, decs):
"""
Get the central angles between ref_ra and
ref_dec and a bunch of ras and decs. Angles
should be in radians.
Useful for computing the separation of points
on the unit sphere.
"""
sin_term = np.sin(ref_dec) * np.sin(decs)
cos_term = np.cos(ref_dec) * np.cos(decs)
diff_term = np.cos(ref_ra - ras)
return np.arccos(sin_term + cos_term * diff_term)
def check_temporal_coincidence(
event_times,
event_src_labels,
event_ras,
event_decs,
event_ang_errs,
spatial_match_inds,
population_variability,
population_flare_times,
population_flare_durations,
population_flare_amplitudes,
):
"""
Check the temporal coincidence of events,
both in the sense of variable objects and
actual flares. Also requires a spatial match
"""
n_match_variable = 0
n_match_variable_astro = 0
n_match_flaring = 0
n_match_flaring_astro = 0
matched_flare_amplitudes = []
matched_nu_ras = []
matched_nu_decs = []
matched_nu_ang_errs = []
matched_nu_times = []
# For each event
for (e_time, e_label, e_ra, e_dec, e_ang_err, match_inds) in zip(
event_times,
event_src_labels,
event_ras,
event_decs,
event_ang_errs,
spatial_match_inds,
):
# If there are spatial matches
if match_inds.size:
# For each matched source
for ind in match_inds:
# If source is variable
if population_variability[ind]:
n_match_variable += 1
if e_label == 1:
n_match_variable_astro += 1
flare_times = population_flare_times[ind]
flare_durations = population_flare_durations[ind]
flare_amplitudes = population_flare_amplitudes[ind]
selection = (e_time >= flare_times) & (
e_time <= flare_times + flare_durations
)
matches = len(np.where(selection == True)[0])
# matches can *very rarely* be >1 for overlapping flares
# overlapping flares can occur if diff(flare_times) < 1 week
# in this case merge flares and take amplitude of first flare
if matches > 0:
n_match_flaring += 1
if e_label == 1:
n_match_flaring_astro += 1
# Store info on this match
matched_flare_amplitudes.append(flare_amplitudes[selection][0])
matched_nu_ras.append(e_ra)
matched_nu_decs.append(e_dec)
matched_nu_ang_errs.append(e_ang_err)
matched_nu_times.append(e_time)
return (
n_match_variable,
n_match_variable_astro,
n_match_flaring,
n_match_flaring_astro,
matched_flare_amplitudes,
matched_nu_ras,
matched_nu_decs,
matched_nu_ang_errs,
matched_nu_times,
)
| 5,821 | 24.761062 | 87 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/_version.py
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "nu_coincidence-"
cfg.versionfile_source = "nu_coincidence/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post0.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post0.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| 18,700 | 32.275801 | 88 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/simulation.py
|
import h5py
import numpy as np
from abc import abstractmethod, ABCMeta
from typing import List
class Simulation(object, metaclass=ABCMeta):
"""
Generic simulation base class.
"""
def __init__(self, file_name="output/test_sim", group_base_name="survey", N=1):
self._N = N
self._file_name = file_name
self._group_base_name = group_base_name
self._param_servers = []
self._setup_param_servers()
self._initialise_output_file()
@abstractmethod
def _setup_param_servers(self):
raise NotImplementedError()
def _initialise_output_file(self):
with h5py.File(self._file_name, "w") as f:
f.attrs["file_name"] = np.string_(self._file_name)
f.attrs["group_base_name"] = np.string_(self._group_base_name)
f.attrs["N"] = self._N
@abstractmethod
def run(self):
raise NotImplementedError()
class Results(object, metaclass=ABCMeta):
"""
Generic results base class.
"""
def __init__(self, file_name_list: List[str]):
self._file_name_list = file_name_list
self.N = 0
self._setup()
self._load_all_files()
@abstractmethod
def _setup(self):
"""
Initialise output.
"""
raise NotImplementedError()
@abstractmethod
def _load_from_h5(self, file_name: str):
"""
Load single file.
"""
raise NotImplementedError()
def _load_all_files(self):
for file_name in self._file_name_list:
self._load_from_h5(file_name)
@classmethod
def load(cls, file_name_list: List[str]):
return cls(file_name_list)
| 1,706 | 18.848837 | 83 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/__init__.py
|
from nu_coincidence.populations.aux_samplers import (
SpectralIndexAuxSampler,
VariabilityAuxSampler,
FlareRateAuxSampler,
FlareTimeAuxSampler,
FlareDurationAuxSampler,
FlareAmplitudeAuxSampler,
CombinedFluxIndexSampler,
)
from nu_coincidence.populations.selection import CombinedFluxIndexSelection
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
| 422 | 23.882353 | 75 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/distributions/cosmological_distribution.py
|
import numpy as np
from popsynth.distribution import DistributionParameter
from popsynth.distributions.cosmological_distribution import CosmologicalDistribution
class ZPowExpCosmoDistribution(CosmologicalDistribution):
r0 = DistributionParameter(default=1, vmin=0)
k = DistributionParameter()
xi = DistributionParameter()
def __init__(self, seed=1234, name="zpowexp_cosmo", is_rate=True):
spatial_form = r"r_0 (1 + z)^k \exp{z/\xi}"
super(ZPowExpCosmoDistribution, self).__init__(
seed=seed,
name=name,
form=spatial_form,
is_rate=is_rate,
)
def dNdV(self, distance):
return self.r0 * np.power(1 + distance, self.k) * np.exp(distance / self.xi)
| 753 | 26.925926 | 85 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/distributions/__init__.py
| 0 | 0 | 0 |
py
|
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/config/__init__.py
| 0 | 0 | 0 |
py
|
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/populations/spatial_populations.py
|
from popsynth.population_synth import PopulationSynth
from nu_coincidence.distributions.cosmological_distribution import (
ZPowExpCosmoDistribution,
)
class ZPowExpCosmoPopulation(PopulationSynth):
def __init__(
self,
r0,
k,
xi,
r_max=5.0,
seed=1234,
luminosity_distribution=None,
is_rate=True,
):
"""
:param Lambda:
:param k:
:param xi:
"""
spatial_distribution = ZPowExpCosmoDistribution(
seed=seed,
is_rate=is_rate,
)
spatial_distribution.r0 = r0
spatial_distribution.k = k
spatial_distribution.xi = xi
spatial_distribution.r_max = r_max
super(ZPowExpCosmoPopulation, self).__init__(
spatial_distribution=spatial_distribution,
luminosity_distribution=luminosity_distribution,
seed=seed,
)
| 937 | 23.051282 | 68 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/populations/popsynth_wrapper.py
|
import yaml
from typing import Dict, Any
from popsynth.population_synth import PopulationSynth
from nu_coincidence.utils.parameter_server import ParameterServer
class PopsynthParams(ParameterServer):
"""
Class to pass necessary params to create
a popsynth population.
"""
def __init__(self, config_file, flux_sigma=0.1):
"""
:popsynth_spec: YAML file containing popsynth info.
"""
super().__init__()
self._config_file = config_file
self._flux_sigma = flux_sigma
with open(self._config_file) as f:
self._pop_spec: Dict[str, Any] = yaml.load(f, Loader=yaml.SafeLoader)
@property
def pop_spec(self):
return self._pop_spec
@property
def flux_sigma(self):
return self._flux_sigma
class PopsynthWrapper(object):
"""
Wrapper to create popsynths from PopsynthParams.
"""
def __init__(self, parameter_server):
self._parameter_server = parameter_server
ps = parameter_server.pop_spec
fs = parameter_server.flux_sigma
self._pop_gen = PopulationSynth.from_dict(ps)
self._pop_gen._seed = parameter_server.seed
self._survey = self._pop_gen.draw_survey(flux_sigma=fs)
@property
def survey(self):
return self._survey
def write(self):
pass
| 1,361 | 19.328358 | 81 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/populations/aux_samplers.py
|
import numpy as np
from scipy import stats
import h5py
from popsynth.auxiliary_sampler import AuxiliarySampler, AuxiliaryParameter
from popsynth.aux_samplers.normal_aux_sampler import NormalAuxSampler
from popsynth.aux_samplers.plaw_aux_sampler import PowerLawAuxSampler, _sample_power_law
from popsynth.utils.cosmology import cosmology
class SpectralIndexAuxSampler(NormalAuxSampler):
"""
Sample the spectral index of a source
with a simple power law spectrum.
"""
_auxiliary_sampler_name = "SpectralIndexAuxSampler"
def __init__(self, name="spectral_index", observed=True):
super().__init__(name=name, observed=observed)
class VariabilityAuxSampler(AuxiliarySampler):
"""
Sample whether a source is variable or not.
Boolean outcome.
"""
_auxiliary_sampler_name = "VariabilityAuxSampler"
weight = AuxiliaryParameter(vmin=0, vmax=1)
def __init__(self, name="variability", observed=False):
super(VariabilityAuxSampler, self).__init__(name=name, observed=observed)
def true_sampler(self, size):
self._true_values = np.random.choice(
[True, False],
p=[self.weight, 1 - self.weight],
size=size,
)
class FlareRateAuxSampler(PowerLawAuxSampler):
"""
Sample source flare rate given its variability.
"""
_auxiliary_sampler_name = "FlareRateAuxSampler"
def __init__(self, name="flare_rate", observed=False):
super(FlareRateAuxSampler, self).__init__(name=name, observed=observed)
def true_sampler(self, size):
rate = np.zeros(size)
variability = self._secondary_samplers["variability"].true_values
rate[variability == False] = 0
rate[variability == True] = 1
super(FlareRateAuxSampler, self).true_sampler(size)
self._true_values = rate * self._true_values
class FlareTimeAuxSampler(AuxiliarySampler):
"""
Sample flare times for each source give
rate and total number of flares.
"""
_auxiliary_sampler_name = "FlareTimeAuxSampler"
obs_time = AuxiliaryParameter(vmin=0, default=1)
def __init__(self, name="flare_times", observed=False):
super(FlareTimeAuxSampler, self).__init__(name=name, observed=observed)
def true_sampler(self, size):
dt = h5py.vlen_dtype(np.dtype("float64"))
times = np.empty((size,), dtype=dt)
rate = self._secondary_samplers["flare_rate"].true_values
for i, _ in enumerate(times):
if rate[i] == 0:
times[i] = np.array([], dtype=np.dtype("float64"))
else:
n_flares = np.random.poisson(rate[i] * self.obs_time)
time_samples = np.random.uniform(0, self.obs_time, size=n_flares)
time_samples = np.sort(time_samples)
times[i] = np.array(time_samples, dtype=np.dtype("float64"))
self._true_values = times
class FlareDurationAuxSampler(AuxiliarySampler):
"""
Sample flare durations given flare times.
"""
_auxiliary_sampler_name = "FlareDurationAuxSampler"
alpha = AuxiliaryParameter(default=-1.5)
def __init__(self, name="flare_durations", observed=False):
super(FlareDurationAuxSampler, self).__init__(name=name, observed=observed)
def true_sampler(self, size):
dt = h5py.vlen_dtype(np.dtype("float64"))
durations = np.empty((size,), dtype=dt)
times = self._secondary_samplers["flare_times"].true_values
obs_time = self._secondary_samplers["flare_times"].obs_time
eps = 1e-3
for i, _ in enumerate(durations):
if times[i].size == 0:
durations[i] = np.array([], dtype=np.dtype("float64"))
else:
# Difference between flare times
max_durations = np.diff(times[i])
# Add final flare duration, can go up until obs_time
max_durations = np.append(max_durations, obs_time - times[i][-1])
# Minimum duration of 1 week
max_durations[max_durations < 1 / 52] = 1 / 52 + eps
durations[i] = np.array(
[
_sample_power_law(1 / 52, md, self.alpha, 1)[0]
for md in max_durations
],
dtype=np.dtype("float64"),
)
self._true_values = durations
class FlareAmplitudeAuxSampler(AuxiliarySampler):
"""
Sample increase in luminosity of the flares
as a multiplicative factor.
"""
_auxiliary_sampler_name = "FlareAmplitudeAuxSampler"
xmin = AuxiliaryParameter(vmin=0, default=1)
alpha = AuxiliaryParameter(default=1)
def __init__(self, name="flare_amplitudes", observed=False):
super(FlareAmplitudeAuxSampler, self).__init__(name=name, observed=observed)
def true_sampler(self, size):
dt = h5py.vlen_dtype(np.dtype("float64"))
amplitudes = np.empty((size,), dtype=dt)
times = self._secondary_samplers["flare_times"].true_values
for i, _ in enumerate(amplitudes):
if times[i].size == 0:
amplitudes[i] = np.array([], dtype=np.dtype("float64"))
else:
n_flares = times[i].size
samples = stats.pareto(self.alpha).rvs(n_flares) * self.xmin
amplitudes[i] = np.array(samples, dtype=np.dtype("float64"))
self._true_values = amplitudes
class FluxSampler(AuxiliarySampler):
"""
Sample observed fluxes based on the latent
fluxes.
This is equivalent to defining ``flux_sigma``
in PopulationSynth.draw_survey(), but also
allows to define more complicated selections
on the *observed* flux, such as the
``CombinedFluxIndexSelection``.
"""
_auxiliary_sampler_name = "FluxSampler"
sigma = AuxiliaryParameter(default=0.1, vmin=0)
def __init__(self):
super(FluxSampler, self).__init__(
"flux",
observed=True,
uses_distance=True,
uses_luminosity=True,
)
def true_sampler(self, size):
# Calculate latent fluxes
dl = cosmology.luminosity_distance(self._distance) # cm
fluxes = self._luminosity / (4 * np.pi * dl ** 2) # erg cm^-2 s^-1
self._true_values = fluxes
def observation_sampler(self, size):
log_fluxes = np.log(self._true_values)
log_obs_fluxes = log_fluxes + np.random.normal(
loc=0, scale=self.sigma, size=size
)
self._obs_values = np.exp(log_obs_fluxes)
class CombinedFluxIndexSampler(AuxiliarySampler):
"""
Make a transformed parameter to perform a
combined linear selection on energy flux and
spectral index.
Selection has the form:
index = ``slope`` log10(flux) + ``intercept``
So, here we transform to:
-(index - ``slope`` log10(flux))
such that a constant selection can be made
on -``intercept``. This works with both
:class:`LowerBound` and :class:`SoftSelection`
See e.g. Fig. 4 in Ajello et al. 2020 (4LAC),
default values are set to approximate this.
"""
_auxiliary_sampler_name = "CombinedFluxIndexSampler"
slope = AuxiliaryParameter(default=3)
def __init__(
self,
):
super(CombinedFluxIndexSampler, self).__init__(
"combined_flux_index",
observed=False,
)
def true_sampler(self, size):
# Get obs fluxes
secondary = self._secondary_samplers["flux"]
obs_fluxes = secondary.obs_values
# Use obs spectral index
spectral_index = self._secondary_samplers["spectral_index"].obs_values
# Transformed based on desired selection
true_values = spectral_index - self.slope * np.log10(obs_fluxes)
# Negative to use with LowerBound/SoftSelection
self._true_values = -true_values
| 7,979 | 25.778523 | 88 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/populations/selection.py
|
from popsynth.selection_probability.generic_selectors import LowerBound, SoftSelection
from popsynth.selection_probability.selection_probability import SelectionParameter
class CombinedFluxIndexSelection(LowerBound):
"""
Selection on :class:`CombinedFluxIndexSampler`,
with the form:
index = ``slope`` log10(flux) + ``intercept``
:class:`CombinedFluxIndexSampler` transforms to:
-(index - ``slope`` log10(flux))
such that a constant selection can be made
on -``intercept``.
See e.g. Fig. 4 in Ajello et al. 2020 (4LAC),
default values are set to approximate this.
"""
_selection_name = "CombinedFluxIndexSelection"
boundary = SelectionParameter(default=-37.5)
def __init__(
self,
name: str = "CombinedFluxIndexSelection",
use_obs_value: bool = True,
):
super(CombinedFluxIndexSelection, self).__init__(
name=name, use_obs_value=use_obs_value
)
class CombinedFluxIndexSoftSelection(SoftSelection):
"""
Selection on :class:`CombinedFluxIndexSampler`,
with the form:
index = ``slope`` log10(flux) + ``intercept``
:class:`CombinedFluxIndexSampler` transforms to:
-(index - ``slope`` log10(flux))
such that a constant selection can be made
on -``intercept``.
See e.g. Fig. 4 in Ajello et al. 2020 (4LAC),
default values are set to approximate this.
"""
_selection_name = "CombinedFluxIndexSoftSelection"
boundary = SelectionParameter(default=-37.5)
strength = SelectionParameter(default=5, vmin=0)
def __init__(
self,
name: str = "CombinedFluxIndexSoftSelection",
use_obs_value: bool = True,
):
super(CombinedFluxIndexSoftSelection, self).__init__(
name=name, use_obs_value=use_obs_value
)
def draw(self, size: int, use_log: bool = False):
"""
Override draw to not use log values.
"""
super().draw(size, use_log)
| 1,992 | 26.30137 | 86 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/populations/__init__.py
| 0 | 0 | 0 |
py
|
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/blazar_nu/base.py
|
from abc import ABCMeta, abstractmethod
from joblib import (
parallel_backend,
register_parallel_backend,
Parallel,
delayed,
)
from nu_coincidence.populations.popsynth_wrapper import (
PopsynthParams,
PopsynthWrapper,
)
from nu_coincidence.simulation import Simulation
from nu_coincidence.utils.package_data import get_path_to_config
from nu_coincidence.neutrinos.icecube import (
IceCubeObsParams,
IceCubeObsWrapper,
IceCubeTracksWrapper,
IceCubeAlertsParams,
IceCubeAlertsWrapper,
)
from nu_coincidence.utils.parallel import FileWritingBackend
register_parallel_backend("file_write", FileWritingBackend)
class BlazarNuSim(Simulation, metaclass=ABCMeta):
"""
Abstract base class for blazar
neutrino simulations.
"""
def __init__(
self,
file_name="output/test_sim.h5",
group_base_name="survey",
N=1,
bllac_config: str = None,
fsrq_config: str = None,
nu_config: str = None,
nu_hese_config: str = None,
nu_ehe_config: str = None,
seed=1000,
):
self._bllac_config = bllac_config
self._fsrq_config = fsrq_config
self._nu_config = nu_config
self._nu_hese_config = nu_hese_config
self._nu_ehe_config = nu_ehe_config
self._seed = seed
self._bllac_param_servers = []
self._fsrq_param_servers = []
self._nu_param_servers = []
super().__init__(
file_name=file_name,
group_base_name=group_base_name,
N=N,
)
def _setup_param_servers(self):
self._bllac_param_servers = []
self._fsrq_param_servers = []
self._nu_param_servers = []
for i in range(self._N):
seed = self._seed + i
# BL Lacs
bllac_spec = get_path_to_config(self._bllac_config)
bllac_param_server = PopsynthParams(bllac_spec)
bllac_param_server.seed = seed
bllac_param_server.file_name = self._file_name
bllac_param_server.group_name = self._group_base_name + "_%i" % i
self._bllac_param_servers.append(bllac_param_server)
# FSRQs
fsrq_spec = get_path_to_config(self._fsrq_config)
fsrq_param_server = PopsynthParams(fsrq_spec)
fsrq_param_server.seed = seed
fsrq_param_server.file_name = self._file_name
fsrq_param_server.group_name = self._group_base_name + "_%i" % i
self._fsrq_param_servers.append(fsrq_param_server)
# Neutrinos
if self._nu_config is not None:
nu_spec = get_path_to_config(self._nu_config)
nu_param_server = IceCubeObsParams.from_file(nu_spec)
else:
nu_hese_spec = get_path_to_config(self._nu_hese_config)
nu_ehe_spec = get_path_to_config(self._nu_ehe_config)
nu_param_server = IceCubeAlertsParams(nu_hese_spec, nu_ehe_spec)
nu_param_server.seed = seed
nu_param_server.file_name = self._file_name
nu_param_server.group_name = self._group_base_name + "_%i" % i
self._nu_param_servers.append(nu_param_server)
def _bllac_pop_wrapper(self, param_server):
return PopsynthWrapper(param_server)
def _fsrq_pop_wrapper(self, param_server):
return PopsynthWrapper(param_server)
def _nu_obs_wrapper(self, param_server):
if self._nu_config is not None:
return IceCubeTracksWrapper(param_server)
else:
return IceCubeAlertsWrapper(param_server)
@abstractmethod
def _blazar_nu_wrapper(self, bllac_pop, fsrq_pop, nu_obs):
raise NotImplementedError()
def _sim_wrapper(self, bllac_param_server, fsrq_param_server, nu_param_server):
bllac_pop = self._bllac_pop_wrapper(bllac_param_server)
fsrq_pop = self._fsrq_pop_wrapper(fsrq_param_server)
nu_obs = self._nu_obs_wrapper(nu_param_server)
result = self._blazar_nu_wrapper(bllac_pop, fsrq_pop, nu_obs)
del bllac_pop, fsrq_pop, nu_obs
return result
def run(self, parallel=True, n_jobs=4):
# Parallel
if parallel:
# Writes to file upon completion
with parallel_backend("file_write"):
Parallel(n_jobs=n_jobs)(
delayed(self._sim_wrapper)(bllac_ps, fsrq_ps, nu_ps)
for bllac_ps, fsrq_ps, nu_ps in zip(
self._bllac_param_servers,
self._fsrq_param_servers,
self._nu_param_servers,
)
)
# Serial
else:
for bllac_ps, fsrq_ps, nu_ps in zip(
self._bllac_param_servers,
self._fsrq_param_servers,
self._nu_param_servers,
):
result = self._sim_wrapper(
bllac_ps,
fsrq_ps,
nu_ps,
)
result.write()
del result
class BlazarNuAction(object, metaclass=ABCMeta):
"""
Abstract base class for different actions
that can be applied to blazar and neutrino
observations e.g. coincidence checks or
connected simulations.
"""
def __init__(
self,
bllac_pop: PopsynthWrapper,
fsrq_pop: PopsynthWrapper,
nu_obs: IceCubeObsWrapper,
name="blazar_nu_action",
):
self._name = name
self._bllac_pop = bllac_pop
self._fsrq_pop = fsrq_pop
self._nu_obs = nu_obs
self._file_name = nu_obs._parameter_server.file_name
self._group_name = nu_obs._parameter_server.group_name
self._run()
@abstractmethod
def _run(self):
raise NotImplementedError()
@abstractmethod
def write(self):
raise NotImplementedError()
@property
def name(self):
return self._name
| 6,053 | 25.669604 | 83 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/blazar_nu/coincidence.py
|
import numpy as np
import h5py
from typing import List
from collections import OrderedDict
from nu_coincidence.simulation import Results
from nu_coincidence.populations.popsynth_wrapper import PopsynthWrapper
from nu_coincidence.neutrinos.icecube import IceCubeObsWrapper
from nu_coincidence.coincidence import (
check_spatial_coincidence,
check_temporal_coincidence,
)
from nu_coincidence.blazar_nu.base import BlazarNuSim, BlazarNuAction
class BlazarNuCoincidenceSim(BlazarNuSim):
"""
Set up and run simulations for blazar-neutrino
coincidences. Assumes blazars and neutrinos have
no underlying connection.
"""
def __init__(
self,
file_name="output/test_coincidence_sim.h5",
group_base_name="survey",
N=1,
bllac_config: str = None,
fsrq_config: str = None,
nu_config: str = None,
nu_hese_config: str = None,
nu_ehe_config: str = None,
seed=1000,
):
super().__init__(
file_name=file_name,
group_base_name=group_base_name,
N=N,
bllac_config=bllac_config,
fsrq_config=fsrq_config,
nu_config=nu_config,
nu_hese_config=nu_hese_config,
nu_ehe_config=nu_ehe_config,
seed=seed,
)
def _blazar_nu_wrapper(self, bllac_pop, fsrq_pop, nu_obs):
return BlazarNuCoincidence(bllac_pop, fsrq_pop, nu_obs)
class BlazarNuCoincidence(BlazarNuAction):
"""
Check for coincidences of interest.
"""
def __init__(
self,
bllac_pop: PopsynthWrapper,
fsrq_pop: PopsynthWrapper,
nu_obs: IceCubeObsWrapper,
name="blazar_nu_coincidence",
):
self._bllac_coincidence = OrderedDict()
self._fsrq_coincidence = OrderedDict()
super().__init__(
bllac_pop=bllac_pop,
fsrq_pop=fsrq_pop,
nu_obs=nu_obs,
name=name,
)
def _run(self):
self._check_spatial()
self._check_temporal()
self._store_survey_info()
@property
def bllac_coincidence(self):
return self._bllac_coincidence
@property
def fsrq_coincidence(self):
return self._fsrq_coincidence
def write(self):
with h5py.File(self._file_name, "r+") as f:
if self._group_name not in f.keys():
group = f.create_group(self._group_name)
else:
group = f[self._group_name]
subgroup = group.create_group(self.name)
bllac_group = subgroup.create_group("bllac")
for key, value in self.bllac_coincidence.items():
if key != "spatial_match_inds":
bllac_group.create_dataset(key, data=value)
fsrq_group = subgroup.create_group("fsrq")
for key, value in self.fsrq_coincidence.items():
if key != "spatial_match_inds":
fsrq_group.create_dataset(key, data=value)
def _check_spatial(self):
"""
Check for spatial coincidences between
the *detected* blazar populations and
neutrinos
"""
observation = self._nu_obs.observation
# BL Lacs
survey = self._bllac_pop.survey
(
n_match_spatial,
n_match_spatial_astro,
spatial_match_inds,
) = check_spatial_coincidence(
np.deg2rad(observation.ra),
np.deg2rad(observation.dec),
np.deg2rad(observation.ang_err),
observation.source_label,
np.deg2rad(survey.ra[survey.selection]),
np.deg2rad(survey.dec[survey.selection]),
)
self.bllac_coincidence["n_spatial"] = n_match_spatial
self.bllac_coincidence["n_spatial_astro"] = n_match_spatial_astro
self.bllac_coincidence["spatial_match_inds"] = spatial_match_inds
# FSRQs
survey = self._fsrq_pop.survey
(
n_match_spatial,
n_match_spatial_astro,
spatial_match_inds,
) = check_spatial_coincidence(
np.deg2rad(observation.ra),
np.deg2rad(observation.dec),
np.deg2rad(observation.ang_err),
observation.source_label,
np.deg2rad(survey.ra[survey.selection]),
np.deg2rad(survey.dec[survey.selection]),
)
self.fsrq_coincidence["n_spatial"] = n_match_spatial
self.fsrq_coincidence["n_spatial_astro"] = n_match_spatial_astro
self.fsrq_coincidence["spatial_match_inds"] = spatial_match_inds
def _check_temporal(self):
"""
Check for temporal coincidences between
the *detected* blazar populations and
neutrinos, which are also spatial
coincidences.
"""
observation = self._nu_obs.observation
# BL Lacs
survey = self._bllac_pop.survey
(
n_match_variable,
n_match_variable_astro,
n_match_flaring,
n_match_flaring_astro,
matched_flare_amplitudes,
matched_nu_ras,
matched_nu_decs,
matched_nu_ang_errs,
matched_nu_times,
) = check_temporal_coincidence(
observation.times,
observation.source_label,
observation.ra,
observation.dec,
observation.ang_err,
self.bllac_coincidence["spatial_match_inds"],
survey.variability[survey.selection],
survey.flare_times[survey.selection],
survey.flare_durations[survey.selection],
survey.flare_amplitudes[survey.selection],
)
self.bllac_coincidence["n_variable"] = n_match_variable
self.bllac_coincidence["n_variable_astro"] = n_match_variable_astro
self.bllac_coincidence["n_flaring"] = n_match_flaring
self.bllac_coincidence["n_flaring_astro"] = n_match_flaring_astro
self.bllac_coincidence["matched_flare_amplitudes"] = matched_flare_amplitudes
self.bllac_coincidence["matched_nu_ras"] = matched_nu_ras
self.bllac_coincidence["matched_nu_decs"] = matched_nu_decs
self.bllac_coincidence["matched_nu_ang_errs"] = matched_nu_ang_errs
self.bllac_coincidence["matched_nu_times"] = matched_nu_times
# FSRQs
survey = self._fsrq_pop.survey
(
n_match_variable,
n_match_variable_astro,
n_match_flaring,
n_match_flaring_astro,
matched_flare_amplitudes,
matched_nu_ras,
matched_nu_decs,
matched_nu_ang_errs,
matched_nu_times,
) = check_temporal_coincidence(
observation.times,
observation.source_label,
observation.ra,
observation.dec,
observation.ang_err,
self.fsrq_coincidence["spatial_match_inds"],
survey.variability[survey.selection],
survey.flare_times[survey.selection],
survey.flare_durations[survey.selection],
survey.flare_amplitudes[survey.selection],
)
self.fsrq_coincidence["n_variable"] = n_match_variable
self.fsrq_coincidence["n_variable_astro"] = n_match_variable_astro
self.fsrq_coincidence["n_flaring"] = n_match_flaring
self.fsrq_coincidence["n_flaring_astro"] = n_match_flaring_astro
self.fsrq_coincidence["matched_flare_amplitudes"] = matched_flare_amplitudes
self.fsrq_coincidence["matched_nu_ras"] = matched_nu_ras
self.fsrq_coincidence["matched_nu_decs"] = matched_nu_decs
self.fsrq_coincidence["matched_nu_ang_errs"] = matched_nu_ang_errs
self.fsrq_coincidence["matched_nu_times"] = matched_nu_times
def _store_survey_info(self):
self.bllac_coincidence["pop_ras"] = []
self.bllac_coincidence["pop_decs"] = []
self.bllac_coincidence["pop_fluxes"] = []
self.fsrq_coincidence["pop_ras"] = []
self.fsrq_coincidence["pop_decs"] = []
self.fsrq_coincidence["pop_fluxes"] = []
if (
self.bllac_coincidence["n_flaring"] > 0
or self.fsrq_coincidence["n_flaring"] > 0
):
# Store positions of all detected sources
survey = self._bllac_pop.survey
self.bllac_coincidence["pop_ras"] = survey.ra[survey.selection]
self.bllac_coincidence["pop_decs"] = survey.dec[survey.selection]
# Store fluxes at the time of nu arrival
pop_fluxes = []
if self.bllac_coincidence["n_flaring"] > 0:
# Is source flaring at this time?
for e_time in self.bllac_coincidence["matched_nu_times"]:
fluxes = survey.flux_obs[survey.selection]
for i in range(len(fluxes)):
flare_times = survey.flare_times[survey.selection][i]
flare_durations = survey.flare_durations[survey.selection][i]
flare_amplitudes = survey.flare_amplitudes[survey.selection][i]
selection = (e_time >= flare_times) & (
e_time <= flare_times + flare_durations
)
matches = len(np.where(selection == True)[0])
if matches > 0:
fluxes[i] *= flare_amplitudes[selection][0]
pop_fluxes.append(fluxes)
self.bllac_coincidence["pop_fluxes"] = pop_fluxes
# Similarly for FSRQs
survey = self._fsrq_pop.survey
self.fsrq_coincidence["pop_ras"] = survey.ra[survey.selection]
self.fsrq_coincidence["pop_decs"] = survey.dec[survey.selection]
pop_fluxes = []
if self.fsrq_coincidence["n_flaring"] > 0:
# Is source flaring at this time?
for e_time in self.fsrq_coincidence["matched_nu_times"]:
fluxes = survey.flux_obs[survey.selection]
for i in range(len(fluxes)):
flare_times = survey.flare_times[survey.selection][i]
flare_durations = survey.flare_durations[survey.selection][i]
flare_amplitudes = survey.flare_amplitudes[survey.selection][i]
selection = (e_time >= flare_times) & (
e_time <= flare_times + flare_durations
)
matches = len(np.where(selection == True)[0])
if matches > 0:
fluxes[i] *= flare_amplitudes[selection][0]
pop_fluxes.append(fluxes)
self.fsrq_coincidence["pop_fluxes"] = pop_fluxes
class BlazarNuCoincidenceResults(Results):
"""
Load results from BlazarNuCoincidenceSim.
"""
def __init__(self, file_name_list: List[str]):
self._file_keys = [
"n_spatial",
"n_spatial_astro",
"n_variable",
"n_variable_astro",
"n_flaring",
"n_flaring_astro",
"matched_flare_amplitudes",
"matched_nu_ras",
"matched_nu_decs",
"matched_nu_ang_errs",
"matched_nu_times",
"pop_ras",
"pop_decs",
"pop_fluxes",
]
super().__init__(file_name_list=file_name_list)
def _setup(self):
self.bllac = OrderedDict()
self.fsrq = OrderedDict()
for key in self._file_keys:
if "pop" in key or "matched_nu" in key:
self.bllac[key] = []
self.fsrq[key] = []
else:
self.bllac[key] = np.array([])
self.fsrq[key] = np.array([])
def _load_from_h5(self, file_name):
with h5py.File(file_name, "r") as f:
N_f = f.attrs["N"]
bllac_f = {}
fsrq_f = {}
for key in self._file_keys:
if "matched" not in key and "pop" not in key:
bllac_f[key] = np.zeros(N_f)
fsrq_f[key] = np.zeros(N_f)
for i in range(N_f):
bllac_group = f["survey_%i/blazar_nu_coincidence/bllac" % i]
fsrq_group = f["survey_%i/blazar_nu_coincidence/fsrq" % i]
for key in self._file_keys:
if "matched" not in key and "pop" not in key:
bllac_f[key][i] = bllac_group[key][()]
fsrq_f[key][i] = fsrq_group[key][()]
elif "pop" in key or "matched_nu" in key:
if bllac_f["n_flaring"][i] >= 1:
bllac_match_i = bllac_group[key][()]
self.bllac[key].append(bllac_match_i)
if fsrq_f["n_flaring"][i] >= 1:
fsrq_match_i = fsrq_group[key][()]
self.fsrq[key].append(fsrq_match_i)
else:
if bllac_f["n_flaring"][i] >= 1:
bllac_match_i = bllac_group[key][()]
self.bllac[key] = np.append(self.bllac[key], bllac_match_i)
if fsrq_f["n_flaring"][i] >= 1:
fsrq_match_i = fsrq_group[key][()]
self.fsrq[key] = np.append(self.fsrq[key], fsrq_match_i)
for key in self._file_keys:
if "matched" not in key and "pop" not in key:
self.bllac[key] = np.append(self.bllac[key], bllac_f[key])
self.fsrq[key] = np.append(self.fsrq[key], fsrq_f[key])
self.N += N_f
| 13,877 | 30.39819 | 87 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/blazar_nu/connected.py
|
import os
import h5py
import numpy as np
from collections import OrderedDict
from typing import List
from astropy import units as u
from icecube_tools.neutrino_calculator import NeutrinoCalculator
from nu_coincidence.populations.popsynth_wrapper import PopsynthWrapper
from nu_coincidence.neutrinos.icecube import (
IceCubeObsWrapper,
IceCubeAlertsWrapper,
_get_point_source,
_run_sim_for,
)
from nu_coincidence.simulation import Results
from nu_coincidence.blazar_nu.base import BlazarNuSim, BlazarNuAction
erg_to_GeV = (1 * u.erg).to(u.GeV).value
class BlazarNuConnectedSim(BlazarNuSim):
"""
Set up and run simulations of neutrinos produced
by blazars.
"""
def __init__(
self,
file_name="output/test_connected_sim.h5",
group_base_name="survey",
N=1,
bllac_config: str = None,
fsrq_config: str = None,
nu_config: str = None,
nu_hese_config: str = None,
nu_ehe_config: str = None,
seed=1000,
flux_factor: float = None,
use_pop_flux_factors: bool = False,
flare_only: bool = False,
det_only: bool = False,
):
super().__init__(
file_name=file_name,
group_base_name=group_base_name,
N=N,
bllac_config=bllac_config,
fsrq_config=fsrq_config,
nu_config=nu_config,
nu_hese_config=nu_hese_config,
nu_ehe_config=nu_ehe_config,
seed=seed,
)
# overwrite flux_factor if provided
if flux_factor:
for i in range(self._N):
self._nu_param_servers[i].hese.connection["flux_factor"] = flux_factor
self._nu_param_servers[i].ehe.connection["flux_factor"] = flux_factor
self._use_pop_flux_factors = use_pop_flux_factors
# store choice for flare_only
self._flare_only = flare_only
# store choice for det_only
self._det_only = det_only
def _blazar_nu_wrapper(self, nu_obs, bllac_pop, fsrq_pop):
return BlazarNuConnection(
nu_obs,
bllac_pop,
fsrq_pop,
use_pop_flux_factors=self._use_pop_flux_factors,
flare_only=self._flare_only,
det_only=self._det_only,
)
class BlazarNuConnection(BlazarNuAction):
"""
Handle connected blazar and neutrino
observations.
"""
def __init__(
self,
bllac_pop: PopsynthWrapper,
fsrq_pop: PopsynthWrapper,
nu_obs: IceCubeObsWrapper,
name="blazar_nu_connection",
use_pop_flux_factors: bool = False,
flare_only: bool = False,
det_only: bool = False,
):
self._bllac_connection = OrderedDict()
self._fsrq_connection = OrderedDict()
self._use_pop_flux_factors = use_pop_flux_factors
self._flare_only = flare_only
self._det_only = det_only
super().__init__(
bllac_pop=bllac_pop,
fsrq_pop=fsrq_pop,
nu_obs=nu_obs,
name=name,
)
@property
def bllac_connection(self):
return self._bllac_connection
@property
def fsrq_connection(self):
return self._fsrq_connection
def _run(self):
self._initialise(self._bllac_connection)
self._initialise(self._fsrq_connection)
if isinstance(self._nu_obs, IceCubeAlertsWrapper):
hese_nu_params = self._nu_obs._parameter_server.hese
ehe_nu_params = self._nu_obs._parameter_server.ehe
hese_nu_detector = self._nu_obs.hese_detector
ehe_nu_detector = self._nu_obs.ehe_detector
# check flux_factor is equal
if (
hese_nu_params.connection["flux_factor"]
!= ehe_nu_params.connection["flux_factor"]
):
raise ValueError(
"Flux factor not equal between HESE and EHE connections."
)
# BL Lacs
self._connected_sim(
self._bllac_pop,
hese_nu_params,
hese_nu_detector,
self._bllac_connection,
)
self._connected_sim(
self._bllac_pop,
ehe_nu_params,
ehe_nu_detector,
self._bllac_connection,
)
# FSRQs
self._connected_sim(
self._fsrq_pop,
hese_nu_params,
hese_nu_detector,
self._fsrq_connection,
)
self._connected_sim(
self._fsrq_pop,
ehe_nu_params,
ehe_nu_detector,
self._fsrq_connection,
)
else:
nu_params = self._nu_obs._parameter_server
nu_detector = self._nu_obs.detector
# BL Lacs
self._connected_sim(
self._bllac_pop, nu_params, nu_detector, self._bllac_connection
)
# FSRQs
self._connected_sim(
self._fsrq_pop, nu_params, nu_detector, self._fsrq_connection
)
def _initialise(self, connection):
connection["nu_Erecos"] = np.array([])
connection["nu_ras"] = np.array([])
connection["nu_decs"] = np.array([])
connection["nu_ang_errs"] = np.array([])
connection["nu_times"] = np.array([])
connection["src_detected"] = np.array([])
connection["src_flare"] = np.array([])
connection["src_id"] = np.array([], dtype=np.int64)
def _connected_sim(
self,
pop,
nu_params,
nu_detector,
connection,
):
"""
Run a connected sim for pop and store
the results in connection.
"""
# Neutrino info
Emin = nu_params.connection["lower_energy"]
Emax = nu_params.connection["upper_energy"]
Emin_sim = nu_params.connection["lower_energy_sim"]
Emin_det = nu_params.detector["Emin_det"]
Enorm = nu_params.connection["normalisation_energy"]
flux_factor = nu_params.connection["flux_factor"]
effective_area = nu_detector.effective_area
seed = nu_params.seed
np.random.seed(seed)
survey = pop.survey
N = len(survey.distances)
connection["Nnu_steady"] = np.zeros(N)
connection["Nnu_ex_steady"] = np.zeros(N)
connection["Nnu_flare"] = np.zeros(N)
connection["Nnu_ex_flare"] = np.zeros(N)
for i in range(N):
ra = np.deg2rad(survey.ra[i])
dec = np.deg2rad(survey.dec[i])
z = survey.distances[i]
spectral_index = survey.spectral_index[i]
# Overwrite flux_factor if necessary
if self._use_pop_flux_factors:
flux_factor = survey.flux_factor[i]
# Calculate steady emission
L_steady = survey.luminosities_latent[i] # erg s^-1 [0.1 - 100 GeV]
# For alternate energy range
# L_steady = _convert_energy_range(
# L_steady, spectral_index, 0.1, 100, 1, 100
# ) # erg s^-1 [1 - 100 GeV]
L_steady = L_steady * erg_to_GeV # GeV s^-1
L_steady = L_steady * flux_factor # Neutrinos
source = _get_point_source(
L_steady,
spectral_index,
z,
ra,
dec,
Emin,
Emax,
Enorm,
)
# Time spent not flaring
total_duration = nu_params.detector["obs_time"]
steady_duration = total_duration - sum(survey.flare_durations[i])
nu_calc = NeutrinoCalculator([source], effective_area)
Nnu_ex_steady = nu_calc(
time=steady_duration,
min_energy=Emin_sim,
max_energy=Emax,
)[0]
connection["Nnu_ex_steady"][i] += Nnu_ex_steady
Nnu_steady = np.random.poisson(Nnu_ex_steady)
connection["Nnu_steady"][i] += Nnu_steady
if Nnu_steady > 0 and not self._flare_only:
# TODO: remove flare periods
if steady_duration < total_duration:
connection["nu_times"] = np.append(
connection["nu_times"],
np.random.uniform(0, total_duration, Nnu_steady),
)
else:
connection["nu_times"] = np.append(
connection["nu_times"],
np.random.uniform(0, total_duration, Nnu_steady),
)
sim = _run_sim_for(
connection["Nnu_steady"][i],
spectral_index,
z,
ra,
dec,
Emin_sim,
Emax,
Enorm,
nu_detector,
seed,
)
connection["nu_Erecos"] = np.append(
connection["nu_Erecos"], sim.reco_energy
)
connection["nu_ras"] = np.append(connection["nu_ras"], sim.ra)
connection["nu_decs"] = np.append(connection["nu_decs"], sim.dec)
connection["nu_ang_errs"] = np.append(
connection["nu_ang_errs"], sim.ang_err
)
connection["src_detected"] = np.append(
connection["src_detected"],
np.repeat(survey.selection[i], connection["Nnu_steady"][i]),
)
connection["src_flare"] = np.append(
connection["src_flare"],
np.repeat(False, connection["Nnu_steady"][i]),
)
connection["src_id"] = np.append(
connection["src_id"], np.repeat(i, connection["Nnu_steady"][i])
)
# Calculate flared emission
if survey.variability[i] and survey.flare_times[i].size > 0:
# Loop over flares
for time, duration, amp in zip(
survey.flare_times[i],
survey.flare_durations[i],
survey.flare_amplitudes[i],
):
L_flare = (
survey.luminosities_latent[i] * amp
) # erg s^-1 [0.1 - 100 GeV]
# alternate energy range
# L_flare = _convert_energy_range(
# L_flare, spectral_index, 0.1, 100, 1, 100
# ) # erg s^-1 [1 - 100 GeV]
L_flare = L_flare * erg_to_GeV # GeV s^-1
L_flare_nu = L_flare * flux_factor # Neutrinos
source = _get_point_source(
L_flare_nu,
spectral_index,
z,
ra,
dec,
Emin,
Emax,
Enorm,
)
# Calulate expected neutrino number per source
nu_calc = NeutrinoCalculator([source], effective_area)
Nnu_ex_flare = nu_calc(
time=duration, min_energy=Emin_sim, max_energy=Emax
)[0]
# Sample actual number of neutrinos per flare
Nnu_flare = np.random.poisson(Nnu_ex_flare)
connection["Nnu_ex_flare"][i] += Nnu_ex_flare
connection["Nnu_flare"][i] += Nnu_flare
# Sample times of nu
if Nnu_flare > 0:
connection["nu_times"] = np.append(
connection["nu_times"],
np.random.uniform(time, time + duration, Nnu_flare),
)
# Simulate neutrino observations
if connection["Nnu_flare"][i] > 0:
sim = _run_sim_for(
connection["Nnu_flare"][i],
spectral_index,
z,
ra,
dec,
Emin_sim,
Emax,
Enorm,
nu_detector,
seed,
)
connection["nu_Erecos"] = np.append(
connection["nu_Erecos"], sim.reco_energy
)
connection["nu_ras"] = np.append(connection["nu_ras"], sim.ra)
connection["nu_decs"] = np.append(connection["nu_decs"], sim.dec)
connection["nu_ang_errs"] = np.append(
connection["nu_ang_errs"], sim.ang_err
)
connection["src_detected"] = np.append(
connection["src_detected"],
np.repeat(survey.selection[i], connection["Nnu_flare"][i]),
)
connection["src_flare"] = np.append(
connection["src_flare"],
np.repeat(True, connection["Nnu_flare"][i]),
)
connection["src_id"] = np.append(
connection["src_id"], np.repeat(i, connection["Nnu_flare"][i])
)
# Select above Emin_det
selection = connection["nu_Erecos"] > Emin_det
connection["nu_Erecos"] = connection["nu_Erecos"][selection]
connection["nu_ras"] = connection["nu_ras"][selection]
connection["nu_decs"] = connection["nu_decs"][selection]
connection["nu_ang_errs"] = connection["nu_ang_errs"][selection]
connection["nu_times"] = connection["nu_times"][selection]
connection["src_detected"] = connection["src_detected"][selection]
connection["src_flare"] = connection["src_flare"][selection]
connection["src_id"] = connection["src_id"][selection]
def write(self):
with h5py.File(self._file_name, "r+") as f:
if self._group_name not in f.keys():
group = f.create_group(self._group_name)
else:
group = f[self._group_name]
subgroup = group.create_group(self.name)
if self._use_pop_flux_factors:
mean_flux_factor = np.mean(
np.concatenate(
[
self._bllac_pop.survey.flux_factor,
self._fsrq_pop.survey.flux_factor,
]
)
)
subgroup.create_dataset("flux_factor", data=mean_flux_factor)
else:
subgroup.create_dataset(
"flux_factor",
data=self._nu_obs._parameter_server.hese.connection["flux_factor"],
)
bllac_group = subgroup.create_group("bllac")
fsrq_group = subgroup.create_group("fsrq")
if self._det_only:
bllac_det_sel = self.bllac_connection["src_detected"].astype(bool)
fsrq_det_sel = self.fsrq_connection["src_detected"].astype(bool)
else:
bllac_det_sel = np.tile(True, len(self.bllac_connection["nu_ras"]))
fsrq_det_sel = np.tile(True, len(self.fsrq_connection["nu_ras"]))
bllac_flare_sel = self.bllac_connection["src_flare"].astype(bool)
fsrq_flare_sel = self.fsrq_connection["src_flare"].astype(bool)
bllac_both_sel = bllac_det_sel & bllac_flare_sel
fsrq_both_sel = fsrq_det_sel & fsrq_flare_sel
# BL Lac
bllac_group.create_dataset(
"n_alerts", data=len(self.bllac_connection["nu_ras"][bllac_det_sel])
)
bllac_group.create_dataset(
"n_alerts_flare",
data=len(self.bllac_connection["nu_ras"][bllac_both_sel]),
)
unique, counts = np.unique(
self.bllac_connection["src_id"][bllac_det_sel], return_counts=True
)
bllac_group.create_dataset("n_multi", data=len(counts[counts > 1]))
unique, counts = np.unique(
self.bllac_connection["src_id"][bllac_both_sel], return_counts=True
)
bllac_group.create_dataset("n_multi_flare", data=len(counts[counts > 1]))
# FSRQ
fsrq_group.create_dataset(
"n_alerts", data=len(self.fsrq_connection["nu_ras"][fsrq_det_sel])
)
fsrq_group.create_dataset(
"n_alerts_flare",
data=len(self.fsrq_connection["nu_ras"][fsrq_both_sel]),
)
unique, counts = np.unique(
self.fsrq_connection["src_id"][fsrq_det_sel], return_counts=True
)
fsrq_group.create_dataset("n_multi", data=len(counts[counts > 1]))
unique, counts = np.unique(
self.fsrq_connection["src_id"][fsrq_both_sel], return_counts=True
)
fsrq_group.create_dataset("n_multi_flare", data=len(counts[counts > 1]))
class BlazarNuConnectedResults(Results):
"""
Handle results from BlazarNuConnectedSim.
"""
def __init__(
self,
file_name_list: List[str],
append_flux_factors: bool = False,
):
self._append_flux_factors = append_flux_factors
super().__init__(file_name_list=file_name_list)
def _setup(self):
self._file_keys = ["n_alerts", "n_alerts_flare", "n_multi", "n_multi_flare"]
self._bllac = OrderedDict()
self._fsrq = OrderedDict()
if self._append_flux_factors:
flux_factors = []
for file_name in self._file_name_list:
with h5py.File(file_name, "r") as f:
flux_factors.extend(f["flux_factors"][()])
self.flux_factors = flux_factors
for key in self._file_keys:
self._bllac[key] = []
self._fsrq[key] = []
else:
# check flux_factors are equal across files
flux_factors = []
for file_name in self._file_name_list:
with h5py.File(file_name, "r") as f:
flux_factors.append(f["flux_factors"][()])
if not all(ff.all() == flux_factors[0].all() for ff in flux_factors):
raise ValueError("Flux factors are not equal across files")
self.flux_factors = flux_factors[0]
for key in self._file_keys:
self._bllac[key] = [[] for _ in self.flux_factors]
self._fsrq[key] = [[] for _ in self.flux_factors]
def _load_from_h5(self, file_name):
with h5py.File(file_name, "r") as f:
bllac_group = f["bllac"]
fsrq_group = f["fsrq"]
for blazar, group in zip(
[self._bllac, self._fsrq],
[bllac_group, fsrq_group],
):
for key in self._file_keys:
if self._append_flux_factors:
blazar[key].extend(group[key][()])
else:
for i in range(len(self.flux_factors)):
blazar[key][i].extend(group[key][()][i])
if self._append_flux_factors:
self.N += len(group[key][()])
else:
self.N += len(group[key][()][0])
@property
def bllac(self):
for key in self._file_keys:
self._bllac[key] = np.array(self._bllac[key])
return self._bllac
@property
def fsrq(self):
for key in self._file_keys:
self._fsrq[key] = np.array(self._fsrq[key])
return self._fsrq
@staticmethod
def merge_over_flux_factor(
sub_file_names: List[str],
flux_factors,
write_to: str = None,
delete=False,
):
_file_keys = ["n_alerts", "n_alerts_flare", "n_multi", "n_multi_flare"]
bllac_results = {}
fsrq_results = {}
for key in _file_keys:
bllac_results[key] = []
bllac_results[key + "_tmp"] = []
fsrq_results[key] = []
fsrq_results[key + "_tmp"] = []
for flux_factor, sub_file_name in zip(flux_factors, sub_file_names):
with h5py.File(sub_file_name, "r") as sf:
N_f = sf.attrs["N"]
for key in _file_keys:
bllac_results[key + "_tmp"] = []
fsrq_results[key + "_tmp"] = []
for i in range(N_f):
try:
# look for survey
survey = sf["survey_%i/blazar_nu_connection" % i]
bllac_group = survey["bllac"]
fsrq_group = survey["fsrq"]
for key in _file_keys:
bllac_results[key + "_tmp"].append(bllac_group[key][()])
fsrq_results[key + "_tmp"].append(fsrq_group[key][()])
except KeyError:
# write nan if no survey found
for key in _file_keys:
bllac_results[key + "_tmp"].append(np.nan)
fsrq_results[key + "_tmp"].append(np.nan)
for key in _file_keys:
bllac_results[key].append(bllac_results[key + "_tmp"])
fsrq_results[key].append(fsrq_results[key + "_tmp"])
# write to single file
if write_to:
with h5py.File(write_to, "w") as f:
f.create_dataset("flux_factors", data=flux_factors)
bllac_group = f.create_group("bllac")
fsrq_group = f.create_group("fsrq")
for key in _file_keys:
bllac_group.create_dataset(key, data=bllac_results[key])
fsrq_group.create_dataset(key, data=fsrq_results[key])
# delete consolidated files
if delete:
for file_name in sub_file_names:
os.remove(file_name)
@staticmethod
def reorganise_file_structure(
file_name: str,
write_to: str = None,
delete=False,
):
_file_keys = ["n_alerts", "n_alerts_flare", "n_multi", "n_multi_flare"]
bllac_results = {}
fsrq_results = {}
flux_factors = []
for key in _file_keys:
bllac_results[key] = []
fsrq_results[key] = []
with h5py.File(file_name, "r") as f:
N_f = f.attrs["N"]
for i in range(N_f):
try:
# look for survey
survey = f["survey_%i/blazar_nu_connection" % i]
bllac_group = survey["bllac"]
fsrq_group = survey["fsrq"]
flux_factor = survey["flux_factor"][()]
flux_factors.append(flux_factor)
for key in _file_keys:
bllac_results[key].append(bllac_group[key][()])
fsrq_results[key].append(fsrq_group[key][()])
except KeyError:
# write nan if no survey found
flux_factors.append(np.nan)
for key in _file_keys:
bllac_results[key].append(np.nan)
fsrq_results[key].append(np.nan)
# write to new file
if write_to:
with h5py.File(write_to, "w") as f:
f.create_dataset("flux_factors", data=flux_factors)
bllac_group = f.create_group("bllac")
fsrq_group = f.create_group("fsrq")
for key in _file_keys:
bllac_group.create_dataset(key, data=bllac_results[key])
fsrq_group.create_dataset(key, data=fsrq_results[key])
# delete consolidated files
if delete:
os.remove(file_name)
def _convert_energy_range(
luminosity,
spectral_index,
Emin,
Emax,
new_Emin,
new_Emax,
):
"""
Convert value of luminosity to be defined
over a different energy range. The units of all
energy quantities must be consient. Assumes a
power-law spectrum.
:param luminosity: L in erg s^-1
:param Emin: Current Emin
:param Emax: Current Emax
:param new_Emin: New Emin
:param new_Emax: New Emax
"""
if spectral_index == 2:
numerator = np.log(new_Emax / new_Emin)
denominator = np.log(Emax / Emin)
else:
numerator = np.power(new_Emin, 2 - spectral_index) - np.power(
new_Emax, 2 - spectral_index
)
denominator = np.power(Emin, 2 - spectral_index) - np.power(
Emax, 2 - spectral_index
)
return luminosity * (numerator / denominator)
| 25,219 | 30.251549 | 87 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/blazar_nu/__init__.py
| 0 | 0 | 0 |
py
|
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/neutrinos/icecube.py
|
import numpy as np
import h5py
import yaml
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import Any, Dict, Optional
from icecube_tools.detector.effective_area import EffectiveArea
from icecube_tools.detector.energy_resolution import EnergyResolution
from icecube_tools.detector.angular_resolution import AngularResolution
from icecube_tools.detector.detector import IceCube
from icecube_tools.source.flux_model import PowerLawFlux
from icecube_tools.source.source_model import DiffuseSource, PointSource
from icecube_tools.simulator import Simulator
from popsynth.utils.cosmology import cosmology
from nu_coincidence.utils.parameter_server import ParameterServer
@dataclass
class IceCubeObservation(object):
"""
Store the output of IceCube simulations.
"""
energies: float
ra: float
dec: float
ang_err: float
times: float
selection: bool
source_label: int
name: str = "icecube_obs"
class IceCubeObsWrapper(object, metaclass=ABCMeta):
"""
Abstract base class for IceCube-like
observations.
"""
def __init__(self, param_server):
self._parameter_server = param_server
self._simulation_setup()
self._run()
@abstractmethod
def _simulation_setup(self):
pass
@abstractmethod
def _run(self):
pass
@property
def observation(self):
return self._observation
def write(self):
with h5py.File(self._parameter_server.file_name, "r+") as f:
if self._parameter_server.group_name not in f.keys():
group = f.create_group(self._parameter_server.group_name)
else:
group = f[self._parameter_server.group_name]
subgroup = group.create_group(self._observation.name)
for key, value in vars(self._observation).items():
if key != "name" and key != "selection":
subgroup.create_dataset(key, data=value, compression="lzf")
for key, value in self._parameter_server.parameters.items():
subgroup.create_dataset(key, data=value)
class IceCubeAlertsWrapper(IceCubeObsWrapper):
"""
Wrapper for simulation of HESE and EHE
alerts.
"""
def __init__(self, param_server):
super().__init__(param_server)
def _simulation_setup(self):
self._energy_res = EnergyResolution.from_dataset("20150820", fetch=False)
self._hese_simulation_setup()
self._ehe_simulation_setup()
@property
def hese_detector(self):
return self._hese_detector
@property
def ehe_detector(self):
return self._ehe_detector
def _hese_simulation_setup(self):
# Sources - all flavor flux
hese_sources = []
if self._parameter_server.hese.atmospheric_flux is not None:
atmo_power_law = PowerLawFlux(
**self._parameter_server.hese.atmospheric_flux
)
atmo_source = DiffuseSource(flux_model=atmo_power_law)
hese_sources.append(atmo_source)
if self._parameter_server.hese.diffuse_flux is not None:
diffuse_power_law = PowerLawFlux(**self._parameter_server.hese.diffuse_flux)
diffuse_source = DiffuseSource(flux_model=diffuse_power_law)
hese_sources.append(diffuse_source)
# Detector
hese_aeff = EffectiveArea.from_dataset(
"20131121",
scale_factor=0.12,
fetch=False,
)
hese_ang_res = AngularResolution.from_dataset(
"20181018",
ret_ang_err_p=0.9,
offset=-0.2,
scale=3,
scatter=0.5,
minimum=0.2,
fetch=False,
)
self._hese_detector = IceCube(hese_aeff, self._energy_res, hese_ang_res)
self._hese_simulator = Simulator(hese_sources, self._hese_detector)
self._hese_simulator.time = self._parameter_server.hese.detector["obs_time"]
self._hese_simulator.max_cosz = self._parameter_server.hese.detector["max_cosz"]
def _ehe_simulation_setup(self):
# Sources - only numu flux
ehe_sources = []
if self._parameter_server.ehe.atmospheric_flux is not None:
atmo_power_law = PowerLawFlux(**self._parameter_server.ehe.atmospheric_flux)
atmo_source = DiffuseSource(flux_model=atmo_power_law)
ehe_sources.append(atmo_source)
if self._parameter_server.ehe.diffuse_flux is not None:
diffuse_power_law = PowerLawFlux(**self._parameter_server.ehe.diffuse_flux)
diffuse_source = DiffuseSource(flux_model=diffuse_power_law)
ehe_sources.append(diffuse_source)
ehe_aeff = EffectiveArea.from_dataset(
"20181018",
scale_factor=0.3,
fetch=False,
)
ehe_ang_res = AngularResolution.from_dataset(
"20181018",
ret_ang_err_p=0.9,
offset=0.0,
scale=1,
minimum=0.2,
scatter=0.2,
fetch=False,
)
self._ehe_detector = IceCube(ehe_aeff, self._energy_res, ehe_ang_res)
self._ehe_simulator = Simulator(ehe_sources, self._ehe_detector)
self._ehe_simulator.time = self._parameter_server.ehe.detector["obs_time"]
self._ehe_simulator.max_cosz = self._parameter_server.ehe.detector["max_cosz"]
def _run(self):
# Only run independent sim if neutrinos are not
# connected to another population.
if (
self._parameter_server.hese.connection is None
and self._parameter_server.ehe.connection is None
):
# HESE
self._hese_simulator.run(
show_progress=False, seed=self._parameter_server.seed
)
hese_Emin_det = self._parameter_server.hese.detector["Emin_det"]
hese_selection = np.array(self._hese_simulator.reco_energy) > hese_Emin_det
hese_times = np.random.uniform(
0,
self._parameter_server.hese.detector["obs_time"],
self._hese_simulator.N,
)
# EHE
self._ehe_simulator.run(
show_progress=False, seed=self._parameter_server.seed
)
ehe_Emin_det = self._parameter_server.ehe.detector["Emin_det"]
ehe_selection = np.array(self._ehe_simulator.reco_energy) > ehe_Emin_det
ehe_times = np.random.uniform(
0,
self._parameter_server.ehe.detector["obs_time"],
self._ehe_simulator.N,
)
# Combine
selection = np.concatenate((hese_selection, ehe_selection))
ra = np.concatenate((self._hese_simulator.ra, self._ehe_simulator.ra))
ra = np.rad2deg(ra[selection])
dec = np.concatenate((self._hese_simulator.dec, self._ehe_simulator.dec))
dec = np.rad2deg(dec[selection])
ang_err = np.concatenate(
(self._hese_simulator.ang_err, self._ehe_simulator.ang_err)
)
ang_err = ang_err[selection]
energies = np.concatenate(
(self._hese_simulator.reco_energy, self._ehe_simulator.reco_energy)
)
energies = energies[selection]
source_labels = np.concatenate(
(self._hese_simulator.source_label, self._ehe_simulator.source_label)
)
source_labels = source_labels[selection]
times = np.concatenate((hese_times, ehe_times))[selection]
self._observation = IceCubeObservation(
energies,
ra,
dec,
ang_err,
times,
selection,
source_labels,
)
else:
self._observation = None
class IceCubeTracksWrapper(IceCubeObsWrapper):
"""
Wrapper for the simulation of track events.
"""
def __init__(self, param_server):
super().__init__(param_server)
@property
def detector(self):
return self._detector
def _simulation_setup(self):
# Sources
sources = []
if self._parameter_server.atmospheric_flux is not None:
atmo_power_law = PowerLawFlux(**self._parameter_server.atmospheric_flux)
atmo_source = DiffuseSource(flux_model=atmo_power_law)
sources.append(atmo_source)
if self._parameter_server.diffuse_flux is not None:
diffuse_power_law = PowerLawFlux(**self._parameter_server.diffuse_flux)
diffuse_source = DiffuseSource(flux_model=diffuse_power_law)
sources.append(diffuse_source)
# Detector
effective_area = EffectiveArea.from_dataset("20181018", fetch=False)
angular_resolution = AngularResolution.from_dataset(
"20181018",
fetch=False,
ret_ang_err_p=0.9, # Return 90% CIs
offset=0.4, # Shift angular error up in attempt to match HESE
)
energy_resolution = EnergyResolution.from_dataset(
"20150820",
fetch=False,
)
self._detector = IceCube(
effective_area,
energy_resolution,
angular_resolution,
)
self._simulator = Simulator(sources, self._detector)
self._simulator.time = self._parameter_server.detector["obs_time"]
self._simulator.max_cosz = self._parameter_server.detector["max_cosz"]
def _run(self):
# Only run independent simulation if neutrinos are not
# connected to another population
if self._parameter_server.connection is None:
self._simulator.run(
show_progress=False,
seed=self._parameter_server.seed,
)
# Select neutrinos above reco energy threshold
Emin_det = self._parameter_server.detector["Emin_det"]
selection = np.array(self._simulator.reco_energy) > Emin_det
ra = np.rad2deg(self._simulator.ra)[selection]
dec = np.rad2deg(self._simulator.dec)[selection]
ang_err = np.array(self._simulator.ang_err)[selection]
energies = np.array(self._simulator.reco_energy)[selection]
source_labels = np.array(self._simulator.source_label)[selection]
times = np.random.uniform(
0,
self._parameter_server.detector["obs_time"],
self._simulator.N,
)
self._observation = IceCubeObservation(
energies,
ra,
dec,
ang_err,
times,
selection,
source_labels,
)
else:
self._observation = None
class IceCubeObsParams(ParameterServer):
"""
All the info you need to recreate a
simulation of neutrinos in IceCube.
"""
def __init__(
self,
detector: Dict[str, Any],
atmospheric_flux: Optional[Dict[str, Any]] = None,
diffuse_flux: Optional[Dict[str, Any]] = None,
connection: Optional[Dict[str, Any]] = None,
):
super().__init__()
self._detector = detector
self._atmospheric_flux = atmospheric_flux
self._diffuse_flux = diffuse_flux
self._connection = connection
def to_dict(self) -> Dict[str, Any]:
output: Dict[str, Any] = {}
output["detector"] = self._detector
if self._atmospheric_flux is not None:
output["atmospheric flux"] = self._atmospheric_flux
if self._diffuse_flux is not None:
output["diffuse flux"] = self._diffuse_flux
if self._connection is not None:
output["connection"] = self._connection
return output
@classmethod
def from_dict(cls, input: Dict[str, Any]) -> "IceCubeObsParams":
detector = input["detector"]
if "atmospheric flux" in input:
atmospheric_flux = input["atmospheric flux"]
else:
atmospheric_flux = None
if "diffuse flux" in input:
diffuse_flux = input["diffuse flux"]
else:
diffuse_flux = None
if "connection" in input:
connection = input["connection"]
else:
connection = None
return cls(detector, atmospheric_flux, diffuse_flux, connection)
def write_to(self, file_name: str):
with open(file_name, "w") as f:
yaml.dump(
stream=f,
data=self.to_dict(),
# default_flow_style=False,
Dumper=yaml.SafeDumper,
)
@classmethod
def from_file(cls, file_name: str) -> "IceCubeObsParams":
with open(file_name) as f:
input: Dict[str, Any] = yaml.load(f, Loader=yaml.SafeLoader)
return cls.from_dict(input)
@property
def detector(self):
return self._detector
@property
def atmospheric_flux(self):
return self._atmospheric_flux
@property
def diffuse_flux(self):
return self._diffuse_flux
@property
def connection(self):
return self._connection
class IceCubeAlertsParams(ParameterServer):
"""
Parameter server for IceCube alerts where
HESE and EHE simulations both require inputs.
For use with IceCubeAlertsWrapper
"""
def __init__(
self,
hese_config_file: str,
ehe_config_file: str,
):
super().__init__()
self._hese = IceCubeObsParams.from_file(hese_config_file)
self._ehe = IceCubeObsParams.from_file(ehe_config_file)
@property
def hese(self):
return self._hese
@property
def ehe(self):
return self._ehe
def _get_point_source(
luminosity,
spectral_index,
z,
ra,
dec,
Emin,
Emax,
Enorm,
):
"""
Define a neutrino point source from
a luminosity and spectral index.
:param luminosity: L in GeV s^-1
:param spectral_index: Spectral index of power law
:param z: Redshift
:param ra: Right ascension
:param dec: Declination
:param Emin: Minimum energy in GeV
:param Emax: Maximum energy in GeV
:param Enorm: Normalisation energy in GeV
"""
energy_flux = luminosity / (4 * np.pi * cosmology.luminosity_distance(z) ** 2)
tmp = PowerLawFlux(
1,
Enorm,
spectral_index,
lower_energy=Emin,
upper_energy=Emax,
)
power = tmp.total_flux_density()
norm = energy_flux / power
power_law = PowerLawFlux(
norm, Enorm, spectral_index, lower_energy=Emin, upper_energy=Emax
)
source = PointSource(flux_model=power_law, z=z, coord=(ra, dec))
return source
def _run_sim_for(
N,
spectral_index,
z,
ra,
dec,
Emin,
Emax,
Enorm,
detector,
seed,
):
"""
Run a simulation of N events with the provided
spectral model and detector info.
:param N: Integer number of neutrinos
:param spectral_index: Spectral index of power law
:param z: Redshift
:param ra: Right ascension
:param dec: Declination
:param Emin: Minimum energy in GeV
:param Emax: Maximum energy in GeV
:param Enorm: Normalisation energy in GeV
:param Detector: IceCube detector
:param seed: Random seed
"""
tmp = PowerLawFlux(1, Enorm, spectral_index, lower_energy=Emin, upper_energy=Emax)
source = PointSource(flux_model=tmp, z=z, coord=(ra, dec))
sim = Simulator(source, detector)
sim.run(
N=N,
show_progress=False,
seed=seed,
)
return sim
| 15,879 | 24.99018 | 88 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/neutrinos/__init__.py
| 0 | 0 | 0 |
py
|
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/utils/plotting.py
|
import numpy as np
from astropy.visualization.wcsaxes.patches import _rotate_polygon
from matplotlib.patches import PathPatch
from matplotlib.path import Path
import astropy.units as u
class SphericalCircle(PathPatch):
# created from the astropy.visualization.wcsaxes.patches.SphericalCircle class
# changed to path from polygon to create disjointed parts
# code from https://github.com/grburgess/pyipn
"""
Create a patch representing a spherical circle - that is, a circle that is
formed of all the points that are within a certain angle of the central
coordinates on a sphere. Here we assume that latitude goes from -90 to +90
This class is needed in cases where the user wants to add a circular patch
to a celestial image, since otherwise the circle will be distorted, because
a fixed interval in longitude corresponds to a different angle on the sky
depending on the latitude.
Parameters
----------
center : tuple or `~astropy.units.Quantity`
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
radius : `~astropy.units.Quantity`
The radius of the circle
resolution : int, optional
The number of points that make up the circle - increase this to get a
smoother circle.
vertex_unit : `~astropy.units.Unit`
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, center, radius, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = center
# #longitude values restricted on domain of (-180,180]
# if longitude.to_value(u.deg) > 180. :
# longitude = -360. * u.deg + longitude.to(u.deg)
# Start off by generating the circle around the North pole
lon = np.linspace(0.0, 2 * np.pi, resolution + 1)[:-1] * u.radian
lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian
lon, lat = _rotate_polygon(lon, lat, longitude, latitude)
# Extract new longitude/latitude in the requested units
lon = lon.to_value(vertex_unit)
lat = lat.to_value(vertex_unit)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
# split path into two sections if circle crosses -180, 180 bounds
codes = []
last = (4000.4 * u.degree).to_value(
vertex_unit
) # 400.4 is a random number large enough so first element is "MOVETO"
for v in vertices:
if np.absolute(v[0] - last) > (300 * u.degree).to_value(vertex_unit):
codes.append(Path.MOVETO)
else:
codes.append(Path.LINETO)
last = v[0]
circle_path = Path(vertices, codes)
super().__init__(circle_path, **kwargs)
def compute_xyz(ra, dec, radius=100):
out = np.zeros((len(ra), 3))
for i, (r, d) in enumerate(zip(ra, dec)):
out[i, 0] = np.cos(d) * np.cos(r)
out[i, 1] = np.cos(d) * np.sin(r)
out[i, 2] = np.sin(d)
return radius * out
def get_lon_lat(center, theta, radius=1, resolution=100):
longitude, latitude = center
# #longitude values restricted on domain of (-180,180]
# if longitude.to_value(u.deg) > 180. :
# longitude = -360. * u.deg + longitude.to(u.deg)
# Start off by generating the circle around the North pole
lon = np.linspace(0.0, 2 * np.pi, resolution + 1)[:-1] * u.radian
# lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian
lat = np.repeat(0.5 * np.pi - theta.to_value(u.radian), resolution) * u.radian
lon, lat = _rotate_polygon(lon, lat, longitude, latitude)
return lon, lat
def get_3d_circle(center, theta, radius, resolution=100):
lon, lat = get_lon_lat(center, theta, radius, resolution)
return compute_xyz(lon, lat, radius)
| 4,272 | 36.156522 | 87 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/utils/fluxes.py
|
import numpy as np
def flux_conv(index, E_min, E_max):
"""
Convert from energy flux [erg cm^-2 s^-1]
to number flux [cm^-2 s^-1] assuming
a bounded power law spectrum.
:param index: Spectral index
:param E_min: Minimum energy
:param E_max: Maximum energy
"""
if index == 1.0:
f1 = np.log(E_max) - np.log(E_min)
else:
f1 = (1 / (1 - index)) * (
np.power(E_max, 1 - index) - np.power(E_min, 1 - index)
)
if index == 2.0:
f2 = np.log(E_max) - np.log(E_min)
else:
f2 = (1 / (2 - index)) * (
np.power(E_max, 2 - index) - np.power(E_min, 2 - index)
)
return f1 / f2
| 696 | 18.361111 | 67 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/utils/parameter_server.py
|
from abc import ABCMeta
class ParameterServer(object, metaclass=ABCMeta):
"""
Abstract base class for parameter servers.
"""
def __init__(self):
self._file_name = None
self._group_name = None
self._seed = None
self._parameters = None
@property
def seed(self):
return self._seed
@seed.setter
def seed(self, value: int):
self._seed = value
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, value: str):
self._file_name = value
@property
def group_name(self):
return self._group_name
@group_name.setter
def group_name(self, value: str):
self._group_name = value
@property
def parameters(self):
return self._parameters
| 838 | 14.830189 | 49 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/utils/package_data.py
|
from pathlib import Path
from pkg_resources import resource_filename
def get_path_to_config(file_name: str) -> Path:
file_path = resource_filename("nu_coincidence", "config/%s" % file_name)
return Path(file_path)
def get_available_config():
config_path = resource_filename("nu_coincidence", "config")
paths = list(Path(config_path).rglob("*.yml"))
files = [p.name for p in paths]
return files
| 427 | 19.380952 | 76 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/utils/parallel.py
|
from abc import abstractmethod, ABCMeta
from joblib._parallel_backends import LokyBackend
class MultiCallback:
"""
Allow for multiple async callbacks
in your custom parallel backend.
"""
def __init__(self, *callbacks):
self.callbacks = [cb for cb in callbacks if cb]
def __call__(self, out):
for cb in self.callbacks:
cb(out)
class ImmediateResultBackend(LokyBackend, metaclass=ABCMeta):
"""
Custom backend for acting on results
as they are processed in a joblib
Parallel() call.
"""
def callback(self, future):
"""
The extra callback passes a future to
future_handler, which must be implemented.
"""
self.future_handler(future)
del future
def apply_async(self, func, callback=None):
"""
Override this method to Handle your new
callback in addition to any existing ones.
"""
callbacks = MultiCallback(callback, self.callback)
return super().apply_async(func, callbacks)
@abstractmethod
def future_handler(self, future):
"""
Do something useful with the
completed future e.g. write to file.
"""
raise NotImplementedError()
class FileWritingBackend(ImmediateResultBackend):
"""
Assumes result from future has a write()
method which is called.
"""
def future_handler(self, future):
result = future.result()[0]
result.write()
del result
| 1,524 | 20.180556 | 61 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/pop_nu/base.py
|
from abc import ABCMeta, abstractmethod
from joblib import (
parallel_backend,
register_parallel_backend,
Parallel,
delayed,
)
from nu_coincidence.populations.popsynth_wrapper import (
PopsynthParams,
PopsynthWrapper,
)
from nu_coincidence.simulation import Simulation
from nu_coincidence.utils.package_data import get_path_to_config
from nu_coincidence.neutrinos.icecube import (
IceCubeObsParams,
IceCubeObsWrapper,
IceCubeTracksWrapper,
IceCubeAlertsParams,
IceCubeAlertsWrapper,
)
from nu_coincidence.utils.parallel import FileWritingBackend
register_parallel_backend("file_write", FileWritingBackend)
class PopNuSim(Simulation, metaclass=ABCMeta):
"""
Abstract base class for popsynth
neutrino simulations.
"""
def __init__(
self,
file_name="output/test_sim.h5",
group_base_name="survey",
N=1,
pop_config: str = None,
nu_config: str = None,
nu_hese_config: str = None,
nu_ehe_config: str = None,
seed=1000,
):
self._pop_config = pop_config
self._nu_config = nu_config
self._nu_hese_config = nu_hese_config
self._nu_ehe_config = nu_ehe_config
self._seed = seed
self._pop_param_servers = []
self._nu_param_servers = []
super().__init__(
file_name=file_name,
group_base_name=group_base_name,
N=N,
)
def _setup_param_servers(self):
self._pop_param_servers = []
self._nu_param_servers = []
for i in range(self._N):
seed = self._seed + i
pop_spec = get_path_to_config(self._pop_config)
pop_param_server = PopsynthParams(pop_spec)
pop_param_server.seed = seed
pop_param_server.file_name = self._file_name
pop_param_server.group_name = self._group_base_name + "_%i" % i
self._pop_param_servers.append(pop_param_server)
# Neutrinos
if self._nu_config is not None:
nu_spec = get_path_to_config(self._nu_config)
nu_param_server = IceCubeObsParams.from_file(nu_spec)
else:
nu_hese_spec = get_path_to_config(self._nu_hese_config)
nu_ehe_spec = get_path_to_config(self._nu_ehe_config)
nu_param_server = IceCubeAlertsParams(nu_hese_spec, nu_ehe_spec)
nu_param_server.seed = seed
nu_param_server.file_name = self._file_name
nu_param_server.group_name = self._group_base_name + "_%i" % i
self._nu_param_servers.append(nu_param_server)
def _pop_wrapper(self, param_server):
return PopsynthWrapper(param_server)
def _nu_obs_wrapper(self, param_server):
if self._nu_config is not None:
return IceCubeTracksWrapper(param_server)
else:
return IceCubeAlertsWrapper(param_server)
@abstractmethod
def _pop_nu_wrapper(self, pop, nu_obs):
raise NotImplementedError()
def _sim_wrapper(self, pop_param_server, nu_param_server):
pop = self._pop_wrapper(pop_param_server)
nu_obs = self._nu_obs_wrapper(nu_param_server)
result = self._pop_nu_wrapper(pop, nu_obs)
del pop, nu_obs
return result
def run(self, parallel=True, n_jobs=4):
# Parallel
if parallel:
# Writes to file upon completion
with parallel_backend("file_write"):
Parallel(n_jobs=n_jobs)(
delayed(self._sim_wrapper)(pop_ps, nu_ps)
for pop_ps, nu_ps in zip(
self._pop_param_servers,
self._nu_param_servers,
)
)
# Serial
else:
for pop_ps, nu_ps in zip(
self._pop_param_servers,
self._nu_param_servers,
):
result = self._sim_wrapper(
pop_ps,
nu_ps,
)
result.write()
del result
class PopNuAction(object, metaclass=ABCMeta):
"""
Abstract base class for different actions
that can be applied to popsynth and neutrino
observations e.g. coincidence checks or
connected simulations.
"""
def __init__(
self,
pop: PopsynthWrapper,
nu_obs: IceCubeObsWrapper,
name="pop_action",
):
self._name = name
self._pop = pop
self._nu_obs = nu_obs
self._file_name = nu_obs._parameter_server.file_name
self._group_name = nu_obs._parameter_server.group_name
self._run()
@abstractmethod
def _run(self):
raise NotImplementedError()
@abstractmethod
def write(self):
raise NotImplementedError()
@property
def name(self):
return self._name
| 4,965 | 23.706468 | 80 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/pop_nu/coincidence.py
|
import numpy as np
import h5py
from typing import List
from collections import OrderedDict
from nu_coincidence.simulation import Results
from nu_coincidence.populations.popsynth_wrapper import PopsynthWrapper
from nu_coincidence.neutrinos.icecube import IceCubeObsWrapper
from nu_coincidence.coincidence import (
count_spatial_coincidence,
check_temporal_coincidence,
)
from nu_coincidence.pop_nu.base import PopNuSim, PopNuAction
class PopNuCoincidenceSim(PopNuSim):
"""
Set up and run simulations for popsynth--neutrino
coincidences. Assumes populations and neutrinos have
no underlying connection.
"""
def __init__(
self,
file_name="output/test_coincidence_sim.h5",
group_base_name="survey",
N=1,
pop_config: str = None,
nu_config: str = None,
nu_hese_config: str = None,
nu_ehe_config: str = None,
seed=1000,
):
super().__init__(
file_name=file_name,
group_base_name=group_base_name,
N=N,
pop_config=pop_config,
nu_config=nu_config,
nu_hese_config=nu_hese_config,
nu_ehe_config=nu_ehe_config,
seed=seed,
)
def _pop_nu_wrapper(self, pop, nu_obs):
return PopNuCoincidence(pop, nu_obs)
class PopNuCoincidence(PopNuAction):
"""
Check for coincidences of interest.
"""
def __init__(
self,
pop: PopsynthWrapper,
nu_obs: IceCubeObsWrapper,
name="pop_nu_coincidence",
):
self._coincidence = OrderedDict()
super().__init__(
pop=pop,
nu_obs=nu_obs,
name=name,
)
def _run(self):
self._check_spatial()
# self._check_temporal()
@property
def coincidence(self):
return self._coincidence
def write(self):
with h5py.File(self._file_name, "r+") as f:
if self._group_name not in f.keys():
group = f.create_group(self._group_name)
else:
group = f[self._group_name]
subgroup = group.create_group(self.name)
pop_group = subgroup.create_group("pop")
for key, value in self._coincidence.items():
if key != "spatial_match_inds":
pop_group.create_dataset(key, data=value)
def _check_spatial(self):
"""
Check for spatial coincidences between
the *detected* blazar populations and
neutrinos
"""
observation = self._nu_obs.observation
survey = self._pop.survey
n_match_spatial, match_ids = count_spatial_coincidence(
np.deg2rad(observation.ra),
np.deg2rad(observation.dec),
np.deg2rad(observation.ang_err),
np.deg2rad(survey.ra[survey.selection]),
np.deg2rad(survey.dec[survey.selection]),
)
self._coincidence["n_spatial"] = n_match_spatial
self._coincidence["match_ids"] = match_ids
def _check_temporal(self):
"""
Check for temporal coincidences between
the *detected* populations and
neutrinos, which are also spatial
coincidences.
"""
observation = self._nu_obs.observation
survey = self._pop.survey
(
n_match_variable,
n_match_flaring,
matched_flare_amplitudes,
) = check_temporal_coincidence(
observation.times,
self._coincidence["spatial_match_inds"],
survey.variability[survey.selection],
survey.flare_times[survey.selection],
survey.flare_durations[survey.selection],
survey.flare_amplitudes[survey.selection],
)
self._coincidence["n_variable"] = n_match_variable
self._coincidence["n_flaring"] = n_match_flaring
self._coincidence["matched_flare_amplitudes"] = matched_flare_amplitudes
class PopNuCoincidenceResults(Results):
"""
Load results from PopNuCoincidenceSim.
"""
def __init__(self, file_name_list: List[str]):
super().__init__(file_name_list=file_name_list)
def _setup(self):
self.pop = OrderedDict()
self.pop["n_spatial"] = np.array([])
self.pop["match_ids"] = []
# self.pop["n_variable"] = np.array([])
# self.pop["n_flaring"] = np.array([])
# self.pop["matched_flare_amplitudes"] = np.array([])
def _load_from_h5(self, file_name):
with h5py.File(file_name, "r") as f:
N_f = f.attrs["N"]
n_spatial_f = np.zeros(N_f)
# n_variable_f = np.zeros(N_f)
# n_flaring_f = np.zeros(N_f)
for i in range(N_f):
group = f["survey_%i/pop_nu_coincidence/pop" % i]
n_spatial_f[i] = group["n_spatial"][()]
self.pop["match_ids"].append(group["match_ids"][()])
# n_variable_f[i] = group["n_variable"][()]
# n_flaring_f[i] = group["n_flaring"][()]
# if n_flaring_f[i] >= 1:
# flare_amps_i = group["matched_flare_amplitudes"][()]
# self.pop["matched_flare_amplitudes"] = np.append(
# self.pop["matched_flare_amplitudes"], flare_amps_i
# )
self.pop["n_spatial"] = np.append(self.pop["n_spatial"], n_spatial_f)
# self.pop["n_variable"] = np.append(self.pop["n_variable"], n_variable_f)
# self.pop["n_flaring"] = np.append(self.pop["n_flaring"], n_flaring_f)
self.N += N_f
| 5,653 | 26.852217 | 82 |
py
|
nu_coincidence
|
nu_coincidence-main/nu_coincidence/pop_nu/__init__.py
| 0 | 0 | 0 |
py
|
|
nu_coincidence
|
nu_coincidence-main/tests/conftest.py
|
import pytest
@pytest.fixture(scope="session")
def output_directory(tmpdir_factory):
directory = tmpdir_factory.mktemp("output")
return directory
| 158 | 14.9 | 47 |
py
|
nu_coincidence
|
nu_coincidence-main/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
nu_coincidence
|
nu_coincidence-main/tests/test_blazar_sim.py
|
from popsynth.population_synth import PopulationSynth
from nu_coincidence.utils.package_data import (
get_available_config,
get_path_to_config,
)
config_files = get_available_config()
def test_blazar_sim():
blazar_config_files = [f for f in config_files if ("bllac" in f or "fsrq" in f)]
for blazar_config_file in blazar_config_files:
print(blazar_config_file)
blazar_config = get_path_to_config(blazar_config_file)
pop_gen = PopulationSynth.from_file(blazar_config)
pop_gen._seed = 42
pop = pop_gen.draw_survey()
assert pop.distances.size > 0
assert pop.distances[pop.selection].size > 0
| 674 | 21.5 | 84 |
py
|
nu_coincidence
|
nu_coincidence-main/tests/test_blazar_nu.py
|
import time
from nu_coincidence.blazar_nu.coincidence import BlazarNuCoincidenceSim
from nu_coincidence.blazar_nu.coincidence import BlazarNuCoincidenceResults
from nu_coincidence.blazar_nu.connected import BlazarNuConnectedSim
from nu_coincidence.blazar_nu.connected import BlazarNuConnectedResults
coincidence_sim_params = {}
coincidence_sim_params["bllac_config"] = "bllac_ref.yml"
coincidence_sim_params["fsrq_config"] = "fsrq_ref.yml"
coincidence_sim_params["nu_hese_config"] = "nu_diffuse_hese.yml"
coincidence_sim_params["nu_ehe_config"] = "nu_diffuse_ehe.yml"
coincidence_sim_params["seed"] = 42
connected_sim_params = {}
connected_sim_params["flux_factors"] = [0.001, 0.01]
connected_sim_params["bllac_config"] = "bllac_ref.yml"
connected_sim_params["fsrq_config"] = "fsrq_ref.yml"
connected_sim_params["nu_hese_config"] = "nu_connected_hese.yml"
connected_sim_params["nu_ehe_config"] = "nu_connected_ehe.yml"
connected_sim_params["seed"] = 42
def test_concidence_sim(output_directory):
sim = BlazarNuCoincidenceSim(
file_name=output_directory.join("test_coincidence_sim.h5"),
N=1,
bllac_config=coincidence_sim_params["bllac_config"],
fsrq_config=coincidence_sim_params["fsrq_config"],
nu_hese_config=coincidence_sim_params["nu_hese_config"],
nu_ehe_config=coincidence_sim_params["nu_ehe_config"],
seed=coincidence_sim_params["seed"],
)
sim.run(parallel=False)
results = BlazarNuCoincidenceResults.load(
[output_directory.join("test_coincidence_sim.h5")]
)
assert len(results.bllac["n_spatial"]) == len(results.fsrq["n_spatial"]) == 1
def test_coincidence_sim_parallel(output_directory):
sim = BlazarNuCoincidenceSim(
file_name=output_directory.join("test_coincidence_sim_parallel.h5"),
N=2,
bllac_config=coincidence_sim_params["bllac_config"],
fsrq_config=coincidence_sim_params["fsrq_config"],
nu_hese_config=coincidence_sim_params["nu_hese_config"],
nu_ehe_config=coincidence_sim_params["nu_ehe_config"],
seed=coincidence_sim_params["seed"],
)
sim.run(parallel=True, n_jobs=2)
time.sleep(1)
results = BlazarNuCoincidenceResults.load(
[output_directory.join("test_coincidence_sim_parallel.h5")]
)
assert len(results.bllac["n_spatial"]) == len(results.fsrq["n_spatial"]) == 2
def test_connected_sim(output_directory):
sub_file_names = [
output_directory.join("test_connected_sim_%.1e.h5" % ff)
for ff in connected_sim_params["flux_factors"]
]
for ff, sfn in zip(connected_sim_params["flux_factors"], sub_file_names):
sim = BlazarNuConnectedSim(
file_name=sfn,
N=1,
bllac_config=connected_sim_params["bllac_config"],
fsrq_config=connected_sim_params["fsrq_config"],
nu_hese_config=connected_sim_params["nu_hese_config"],
nu_ehe_config=connected_sim_params["nu_ehe_config"],
seed=connected_sim_params["seed"],
flux_factor=ff,
flare_only=True,
det_only=True,
)
sim.run(parallel=False)
BlazarNuConnectedResults.merge_over_flux_factor(
sub_file_names,
connected_sim_params["flux_factors"],
write_to=output_directory.join("test_connected_sim.h5"),
)
results = BlazarNuConnectedResults.load(
[output_directory.join("test_connected_sim.h5")]
)
assert len(results.bllac["n_alerts"]) == len(results.fsrq["n_alerts"]) == 2
def test_connected_sim_parallel(output_directory):
sub_file_names = [
output_directory.join("test_connected_sim_%.1e.h5" % ff)
for ff in connected_sim_params["flux_factors"]
]
for ff, sfn in zip(connected_sim_params["flux_factors"], sub_file_names):
sim = BlazarNuConnectedSim(
file_name=sfn,
N=2,
bllac_config=connected_sim_params["bllac_config"],
fsrq_config=connected_sim_params["fsrq_config"],
nu_hese_config=connected_sim_params["nu_hese_config"],
nu_ehe_config=connected_sim_params["nu_ehe_config"],
seed=connected_sim_params["seed"],
flux_factor=ff,
flare_only=True,
det_only=True,
)
sim.run(parallel=True, n_jobs=2)
time.sleep(1)
BlazarNuConnectedResults.merge_over_flux_factor(
sub_file_names,
connected_sim_params["flux_factors"],
write_to=output_directory.join("test_connected_sim_parallel.h5"),
)
results = BlazarNuConnectedResults.load(
[output_directory.join("test_connected_sim_parallel.h5")]
)
assert len(results.bllac["n_alerts"]) == len(results.fsrq["n_alerts"]) == 2
| 4,776 | 32.405594 | 81 |
py
|
nu_coincidence
|
nu_coincidence-main/tests/test_nu_sim.py
|
from icecube_tools.detector.effective_area import EffectiveArea
from icecube_tools.detector.energy_resolution import EnergyResolution
from icecube_tools.detector.angular_resolution import AngularResolution
from nu_coincidence.utils.package_data import (
get_available_config,
get_path_to_config,
)
from nu_coincidence.neutrinos.icecube import (
IceCubeAlertsWrapper,
IceCubeAlertsParams,
IceCubeTracksWrapper,
IceCubeObsParams,
)
config_files = get_available_config()
# Make sure icecube_tools data is loaded
my_aeff = EffectiveArea.from_dataset("20181018")
my_aeff = EffectiveArea.from_dataset("20131121")
my_eres = EnergyResolution.from_dataset("20150820")
my_angres = AngularResolution.from_dataset("20181018")
def test_icecube_alerts_diffuse_sim():
hese_config_files = [f for f in config_files if ("hese" in f and "diffuse" in f)]
ehe_config_files = [f for f in config_files if ("ehe" in f and "diffuse" in f)]
for hese_config_file, ehe_config_file in zip(
hese_config_files,
ehe_config_files,
):
print(hese_config_file, ehe_config_file)
hese_config = get_path_to_config(hese_config_file)
ehe_config = get_path_to_config(ehe_config_file)
param_server = IceCubeAlertsParams(hese_config, ehe_config)
param_server.seed = 42
nu_obs = IceCubeAlertsWrapper(param_server)
assert len(nu_obs.observation.ra) == len(nu_obs.observation.dec)
assert len(nu_obs.observation.ra) > 0
def test_icecube_tracks_diffuse_sim():
track_config_files = [f for f in config_files if ("tracks" in f and "diffuse" in f)]
for track_config_file in track_config_files:
track_config = get_path_to_config(track_config_file)
param_server = IceCubeObsParams.from_file(track_config)
param_server.seed = 42
nu_obs = IceCubeTracksWrapper(param_server)
assert len(nu_obs.observation.ra) == len(nu_obs.observation.dec)
assert len(nu_obs.observation.ra) > 0
| 2,022 | 27.9 | 88 |
py
|
nu_coincidence
|
nu_coincidence-main/docs/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "nu_coincidence"
copyright = "2022, Francesca Capel"
author = "Francesca Capel"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"nbsphinx",
"sphinx.ext.githubpages",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 1,979 | 33.736842 | 79 |
py
|
spring-security
|
spring-security-main/itest/context/src/integration-test/resources/someMethod.py
|
print authentication.name;
for authority in authentication.authorities:
print authority
print "Granting access"
allow = 1
| 129 | 13.444444 | 44 |
py
|
rewalt
|
rewalt-main/tests/test_diagrams.py
|
from pytest import raises
from rewalt import utils, diagrams
from rewalt.ogposets import El
from rewalt.shapes import Shape
from rewalt.diagrams import (DiagSet, Diagram)
""" Tests for DiagSet """
def test_DiagSet_init():
X = DiagSet()
assert X == X
Y = DiagSet()
assert not X == Y
assert len(X) == 0
""" Some example objects """
Mon = DiagSet()
pt = Mon.add('pt')
a = Mon.add('a', pt, pt)
m = Mon.add('m', a.paste(a), a)
u = Mon.add('u', pt.unit(), a)
assoc = Mon.add(
'assoc',
m.to_inputs(0, m),
m.to_inputs(1, m))
lunit = Mon.add(
'lunit',
m.to_inputs(0, u),
a.lunitor('-'))
runit = Mon.add(
'runit',
m.to_inputs(1, u),
a.runitor('-'))
assinv, rinv, linv = Mon.invert('assoc')
RP3 = DiagSet()
c0 = RP3.add_simplex('c0')
c1 = RP3.add_simplex('c1', c0, c0)
c2 = RP3.add_simplex(
'c2',
c1,
c0.simplex_degeneracy(0),
c1)
c3 = RP3.add_simplex(
'c3',
c2,
c1.simplex_degeneracy(0),
c1.simplex_degeneracy(1),
c2)
T2 = DiagSet()
v = T2.add_cube('v')
e0 = T2.add_cube('e0', v, v)
e1 = T2.add_cube('e1', v, v)
s = T2.add_cube('s', e0, e0, e1, e1)
C = DiagSet()
x = C.add('x')
y = C.add('y')
z = C.add('z')
f = C.add('f', x, y)
g = C.add('g', y, z)
h, c_fg = C.compose(f.paste(g), 'h', 'c_fg')
def test_DiagSet_str():
assert str(Mon) == 'DiagSet with 10 generators'
def test_DiagSet_getitem():
assert m == Mon['m']
assert assoc == Mon['assoc']
with raises(KeyError) as err:
Mon['n']
assert str(err.value) == "'n'"
def test_DiagSet_contains():
assert 'm' in Mon
assert 'n' not in Mon
def test_DiagSet_len():
assert len(Mon) == 10
def test_DiagSet_generators():
X = DiagSet()
X.add('x')
assert X.generators == {
'x': {
'shape': Shape.point(),
'mapping': [['x']],
'faces': set(),
'cofaces': set()}}
def test_DiagSet_by_dim():
assert Mon.by_dim == {
0: {'pt'},
1: {'a'},
2: {'m', 'u'},
3: {'assoc', 'lunit', 'runit', 'assoc⁻¹'},
4: {'inv(assoc, assoc⁻¹)', 'inv(assoc⁻¹, assoc)'}}
def test_DiagSet_compositors():
assert C.compositors == {
'c_fg': {
'shape': f.paste(g).shape,
'mapping': f.paste(g).mapping}}
def test_DiagSet_dim():
assert Mon.dim == 4
assert C.dim == 2
def test_DiagSet_issimplicial():
assert RP3.issimplicial
assert not C.issimplicial
def test_DiagSet_iscubical():
assert T2.iscubical
assert not Mon.iscubical
def test_DiagSet_add():
X = DiagSet()
x = X.add('x')
with raises(ValueError) as err:
X.add('x')
assert str(err.value) == utils.value_err(
'x', 'name already in use')
y = X.add('y', linvertor='test')
with raises(KeyError) as err:
X.generators['y']['linvertor']
assert str(err.value) == "'linvertor'"
with raises(ValueError) as err:
X.add('a', pt, pt)
assert str(err.value) == utils.value_err(
pt, 'not a diagram in {}'.format(repr(X)))
a = X.add('a', x, y)
b = X.add('b', y, x)
with raises(ValueError) as err:
X.add('p', a, b)
assert str(err.value) == utils.value_err(
b,
'boundary does not match boundary of {}'.format(repr(a)))
def test_DiagSet_add_simplex():
S = DiagSet()
x0 = S.add_simplex('x0')
with raises(ValueError) as err:
S.add_simplex('x0')
assert str(err.value) == utils.value_err(
'x0', 'name already in use')
x1 = S.add_simplex('x1', x0, x0)
paste = x1.paste(x1)
with raises(TypeError) as err:
S.add_simplex('x2', paste, x1)
assert str(err.value) == utils.type_err(
diagrams.SimplexDiagram, paste)
with raises(ValueError) as err:
S.add_simplex('x2', x1, x0, x1)
assert str(err.value) == utils.value_err(
x0, 'expecting a 1-simplex in {}'.format(
repr(S)))
y0 = S.add_simplex('y0')
y1 = S.add_simplex('y1', y0, x0)
with raises(ValueError) as err:
S.add_simplex('x2', x1, y1, y1)
assert str(err.value) == utils.value_err(
y1, 'boundary of face does not match other faces')
def test_DiagSet_add_cube():
K = DiagSet()
x0 = K.add_cube('x0')
with raises(ValueError) as err:
K.add_cube('x0')
assert str(err.value) == utils.value_err(
'x0', 'name already in use')
x1 = K.add_cube('x1', x0, x0)
paste = x1.paste(x1)
with raises(TypeError) as err:
K.add_cube('x2', paste, paste)
assert str(err.value) == utils.type_err(
diagrams.CubeDiagram, paste)
with raises(ValueError) as err:
K.add_cube('x2', x1, x1, x0, x1)
assert str(err.value) == utils.value_err(
x0, 'expecting a 1-cube in {}'.format(
repr(K)))
y0 = K.add_cube('y0')
y1 = K.add_cube('y1', x0, y0)
with raises(ValueError) as err:
K.add_cube('x2', x1, x1, y1, y1)
assert str(err.value) == utils.value_err(
y1, 'boundary of face does not match other faces')
def test_DiagSet_invert():
X = DiagSet()
x = X.add('x')
y = X.add('y')
f = X.add('f', x, y)
g, rinv, linv = X.invert('f')
assert g.input == y
assert g.output == x
assert rinv.input == f.paste(g)
assert rinv.output == x.unit()
assert linv.input == g.paste(f)
assert linv.output == y.unit()
assert g.name == 'f⁻¹'
assert rinv.name == 'inv(f, f⁻¹)'
assert linv.name == 'inv(f⁻¹, f)'
with raises(ValueError) as err:
X.invert('x')
assert str(err.value) == utils.value_err(
'x', 'cannot invert 0-cell')
with raises(ValueError) as err:
X.invert('f⁻¹')
assert str(err.value) == utils.value_err(
'f⁻¹', 'already inverted')
def test_DiagSet_make_inverses():
X = DiagSet()
x = X.add('x')
y = X.add('y')
f = X.add('f', x, y)
g = X.add('g', y, x)
rinv, linv = X.make_inverses('f', 'g')
assert rinv.input == f.paste(g)
assert rinv.output == x.unit()
assert linv.input == g.paste(f)
assert linv.output == y.unit()
assert rinv.name == 'inv(f, g)'
assert linv.name == 'inv(g, f)'
X.add('h', y, x)
with raises(ValueError) as err:
X.make_inverses('h', 'f')
assert str(err.value) == utils.value_err(
'f', 'already inverted')
def test_DiagSet_compose():
X = DiagSet()
x = X.add('x')
y = X.add('y')
z = X.add('z')
f = X.add('f', x, y)
g = X.add('g', y, z)
fpasteg = f.paste(g)
fg, c_fg = X.compose(f.paste(g))
assert fg.input == x
assert fg.output == z
assert c_fg.input == f.paste(g)
assert c_fg.output == fg
assert fg.name == '⟨{}⟩'.format(f.paste(g).name)
assert c_fg.name == 'comp({})'.format(f.paste(g).name)
p = X.add('p', f, f)
q = X.add('q', g, g)
notround = p.paste(q, 0)
with raises(ValueError) as err:
X.compose(notround)
assert str(err.value) == utils.value_err(
notround, 'composable diagrams must have round shape')
with raises(ValueError) as err:
X.compose(fpasteg)
assert str(err.value) == utils.value_err(
fpasteg, 'already has a composite')
def test_DiagSet_make_composite():
X = DiagSet()
x = X.add('x')
y = X.add('y')
z = X.add('z')
f = X.add('f', x, y)
g = X.add('g', y, z)
h = X.add('h', x, z)
c_fg = X.make_composite('h', f.paste(g))
assert c_fg.input == f.paste(g)
assert c_fg.output == h
assert c_fg.name == 'comp({})'.format(f.paste(g).name)
p = X.add('p', f, f)
q = X.add('q', g, g)
X.add('r', f.paste(g), f.paste(g))
notround = p.paste(q, 0)
with raises(ValueError) as err:
X.make_composite('r', notround)
assert str(err.value) == utils.value_err(
notround, 'composable diagrams must have round shape')
X.add('k', x, z)
fpasteg = f.paste(g)
with raises(ValueError) as err:
X.make_composite('k', fpasteg)
assert str(err.value) == utils.value_err(
fpasteg, 'already has a composite')
def test_DiagSet_remove():
X = DiagSet()
x = X.add('x')
y = X.add('y')
X.add('f', x, y)
assert 'y' in X
assert 'f' in X
X.remove('y')
assert 'y' not in X
assert 'f' not in X
assert 'x' in X
a = X.add('a', x, x)
X.add('b', x, x)
rinv, linv = X.make_inverses('a', 'b')
assert a.isinvertiblecell
X.remove('b')
assert not a.isinvertiblecell
def test_DiagSet_update():
X = DiagSet()
X.add('x', color='blue')
assert X.generators['x']['color'] == 'blue'
X.update('x', color='magenta')
assert X.generators['x']['color'] == 'magenta'
with raises(AttributeError) as err:
X.update('x', shape='circle')
assert str(err.value) == "('shape', 'private attribute')"
def test_DiagSet_yoneda():
arrow = Shape.arrow()
embedarrow = DiagSet.yoneda(arrow)
assert embedarrow.by_dim == {
0: {El(0, 0), El(0, 1)},
1: {El(1, 0)}}
""" Tests for Diagram """
def test_Diagram_init():
empty = Diagram(C)
assert empty.shape == Shape.empty()
assert empty.mapping == []
assert empty.ambient == C
def test_Diagram_str():
assert str(a) == 'a'
assert str(c1) == 'c1'
def test_Diagram_eq():
assert a == Mon['a']
assert c2.output == c1.paste(c1)
def test_Diagram_len():
assert len(a) == 3
def test_Diagram_getitem():
assert c2[El(2, 0)] == 'c2'
assert c2[El(1, 0)] == 'c0'
assert c2[El(1, 1)] == 'c1'
def test_Diagram_contains():
assert El(1, 2) in c2
assert El(1, 3) not in c2
def test_Diagram_name():
assert assoc.name == 'assoc'
def test_Diagram_shape():
binary = Shape.simplex(2).dual()
assoc_l = binary.to_inputs(0, binary)
assoc_r = binary.to_inputs(1, binary)
assert assoc.shape == assoc_l.atom(assoc_r)
def test_Diagram_ambient():
assert a.ambient == Mon
assert c1.ambient == RP3
def test_Diagram_mapping():
assert c2.mapping == [
['c0', 'c0', 'c0'],
['c0', 'c1', 'c1'],
['c2']]
def test_Diagram_layers():
diagram = u.paste(pt.unit()).paste(
a.paste(u))
assert diagram.layers == [
u.paste(pt.unit()), a.paste(u)]
def test_Diagram_rewrite_steps():
diagram = u.paste(pt.unit()).paste(
a.paste(u))
assert diagram.rewrite_steps == [
pt.unit().paste(pt.unit()),
a.paste(pt.unit()),
a.paste(a)]
def test_Diagram_dim():
assert c0.dim == 0
assert c1.dim == 1
assert c2.dim == 2
assert c3.dim == 3
def test_Diagram_isdegenerate():
assert not Diagram(C).isdegenerate
assert a.lunitor('-').isdegenerate
assert not c2.isdegenerate
def test_Diagram_isround():
assert not m.paste(m, 0).isround
assert m.paste(m, 0).paste(m).isround
def test_Diagram_iscell():
assert m.iscell
assert a.unit().iscell
assert not m.paste(a).iscell
def test_Diagram_isinvertiblecell():
assert a.lunitor('-').isinvertiblecell
assert assoc.isinvertiblecell
assert not m.isinvertiblecell
def test_Diagram_hascomposite():
assert f.paste(g).hascomposite
assert m.hascomposite
assert not a.paste(a).hascomposite
def test_Diagram_rename():
fpasteg = f.paste(g)
assert str(fpasteg) == '(f) #0 (g)'
fpasteg.rename('f #0 g')
assert str(fpasteg) == 'f #0 g'
def test_Diagram_paste():
with raises(ValueError) as err:
f.paste(a)
assert str(err.value) == utils.value_err(
a, 'not the same ambient DiagSet')
assert f.paste(g).input == f.input
assert f.paste(g).output == g.output
assert f.paste(y, 0) == f
assert x.paste(f, 0) == f
with raises(ValueError) as err:
f.paste(f)
assert str(err.value) == utils.value_err(
f, 'boundary does not match boundary of {}'.format(
repr(f)))
def test_Diagram_to_outputs():
with raises(ValueError) as err:
f.to_outputs(1, a)
assert str(err.value) == utils.value_err(
a, 'not the same ambient DiagSet')
with raises(ValueError) as err:
c2.to_outputs(1, c2)
assert str(err.value) == utils.value_err(
c2, 'boundary does not match boundary of {}'.format(
repr(c2)))
assert c2.to_outputs(1, c1.unit()).output == c2.output
def test_Diagram_to_inputs():
with raises(ValueError) as err:
f.to_inputs(0, a)
assert str(err.value) == utils.value_err(
a, 'not the same ambient DiagSet')
pt2unit = pt.unit().unit()
with raises(ValueError) as err:
m.to_inputs(0, pt2unit)
assert str(err.value) == utils.value_err(
m,
'boundary does not match boundary of {}'.format(
repr(pt2unit)))
assert m.to_inputs(1, m).input == a.paste(a).paste(a)
def test_Diagram_pullback():
arrow = Shape.arrow()
connection = arrow.cube_connection(0, '-')
fconn = f.pullback(connection)
assert fconn.shape == connection.source
with raises(ValueError) as err:
m.pullback(connection)
assert str(err.value) == utils.value_err(
connection, 'target does not match diagram shape')
def test_Diagram_boundary():
assert m.input == a.paste(a)
assert m.output == a
assert m.boundary('-', 0) == pt
assert m.boundary('+', 0) == pt
def test_Diagram_unit():
diagram = m.to_inputs(0, u)
unit = diagram.unit()
assert unit.shape == diagram.shape.inflate().source
assert unit.input == diagram
assert unit.output == diagram
def test_Diagram_lunitor():
assert a.lunitor('-').input == pt.unit().paste(a)
assert a.lunitor('+') == a.lunitor('-').inverse
assert m.lunitor('-', 0).input == m.to_inputs(0, a.unit())
def test_Diagram_runitor():
assert a.runitor('+').output == a.paste(pt.unit())
assert a.runitor('+') == a.runitor('-').inverse
assert c2.runitor('-', 2).input == c2.to_outputs(2, c1.unit())
def test_Diagram_inverse():
assert assoc.inverse == assinv
assert a.unit().inverse == a.unit()
with raises(ValueError) as err:
m.inverse
assert str(err.value) == utils.value_err(
m, 'not an invertible cell')
def test_Diagram_rinvertor():
assert assoc.rinvertor == assinv.linvertor
assert assoc.rinvertor == rinv
assert a.unit().rinvertor == a.unit().lunitor('-')
with raises(ValueError) as err:
m.rinvertor
assert str(err.value) == utils.value_err(
m, 'not an invertible cell')
def test_Diagram_linvertor():
assert assoc.linvertor == linv
assert a.unit().linvertor == a.unit().lunitor('-')
with raises(ValueError) as err:
m.linvertor
assert str(err.value) == utils.value_err(
m, 'not an invertible cell')
def test_Diagram_composite():
assert f.paste(g).composite == h
assert m.composite == m
aa = a.paste(a)
with raises(ValueError) as err:
aa.composite
assert str(err.value) == utils.value_err(
aa, 'does not have a composite')
def test_Diagram_compositor():
assert f.paste(g).compositor == c_fg
assert m.compositor == m.unit()
aa = a.paste(a)
with raises(ValueError) as err:
aa.compositor
assert str(err.value) == utils.value_err(
aa, 'does not have a compositor')
def test_Diagram_yoneda():
arrow = Shape.arrow()
connection = arrow.cube_connection(0, '-')
yoneda_conn = Diagram.yoneda(connection)
assert yoneda_conn.ambient.generators == \
DiagSet.yoneda(arrow).generators
assert yoneda_conn.shape == connection.source
assert yoneda_conn.mapping == connection.mapping
def test_Diagram_with_layers():
layer1 = m.paste(pt.unit())
layer2 = a.paste(u)
layer3 = m
diagram = Diagram.with_layers(layer1, layer2, layer3)
assert diagram.layers == [layer1, layer2, layer3]
assert diagram == layer1.paste(
layer2.paste(layer3))
""" Tests for Diagram subclasses """
def test_SimplexDiagram():
assert isinstance(c3.simplex_face(2), diagrams.SimplexDiagram)
assert isinstance(
c1.simplex_degeneracy(1), diagrams.SimplexDiagram)
assert c1.simplex_degeneracy(1).simplex_degeneracy(2) == \
c1.simplex_degeneracy(1).simplex_degeneracy(1)
assert c2.simplex_degeneracy(2).simplex_face(2) == c2
assert c2.simplex_degeneracy(2).simplex_face(0) == \
c2.simplex_face(0).simplex_degeneracy(1)
def test_CubeDiagram():
assert isinstance(s.cube_face(1, '+'), diagrams.CubeDiagram)
assert isinstance(e1.cube_degeneracy(1), diagrams.CubeDiagram)
assert isinstance(e0.cube_connection(0, '+'), diagrams.CubeDiagram)
assert e0.cube_degeneracy(1).cube_degeneracy(2) == \
e0.cube_degeneracy(1).cube_degeneracy(1)
assert e1.cube_connection(0, '-').cube_connection(1, '-') == \
e1.cube_connection(0, '-').cube_connection(0, '-')
| 17,213 | 24.240469 | 71 |
py
|
rewalt
|
rewalt-main/tests/test_shapes.py
|
from pytest import raises
from rewalt import utils, shapes
from rewalt.ogposets import (El, OgPoset, OgMap, OgMapPair)
from rewalt.shapes import (Shape, ShapeMap)
""" Tests for Shape """
def test_Shape_init():
assert Shape() == Shape()
assert Shape().dim == -1
def test_Shape_isatom():
empty = Shape.empty()
arrow = Shape.arrow()
assert not empty.isatom
assert arrow.isatom
assert not arrow.paste(arrow).isatom
def test_Shape_isround():
frob = Shape.simplex(2).paste(Shape.arrow()).paste(
Shape.arrow().paste(Shape.simplex(2).dual()))
assert frob.isround
whisker_l = Shape.arrow().paste(Shape.globe(2))
assert not whisker_l.isround
def test_Shape_layers():
arrow = Shape.arrow()
globe = Shape.globe(2)
cospan = globe.paste(arrow).paste(
arrow.paste(globe), cospan=True)
shape = cospan.target
assert shape.layers == [cospan.fst, cospan.snd]
def test_Shape_rewrite_steps():
arrow = Shape.arrow()
globe = Shape.globe(2)
cospan = globe.paste(arrow).paste(
arrow.paste(globe), cospan=True)
shape = cospan.target
assert shape.rewrite_steps == [
cospan.fst.input,
cospan.fst.output,
cospan.snd.output]
def test_Shape_atom():
empty = Shape.empty()
point = Shape.point()
arrow = Shape.arrow()
globe = Shape.globe(2)
assert point == empty.atom(empty)
assert arrow == point.atom(point)
whisker_l = arrow.paste(globe)
with raises(ValueError) as err:
whisker_l.atom(whisker_l)
assert str(err.value) == utils.value_err(
whisker_l, 'expecting a round Shape')
binary = arrow.paste(arrow).atom(arrow)
cobinary = Shape.simplex(2)
with raises(ValueError) as err:
binary.atom(cobinary)
assert str(err.value) == utils.value_err(
cobinary, 'input boundary does not match '
'input boundary of {}'.format(repr(binary)))
with raises(ValueError) as err:
cobinary.atom(globe)
assert str(err.value) == utils.value_err(
globe, 'output boundary does not match '
'output boundary of {}'.format(repr(cobinary)))
assert isinstance(arrow.atom(arrow), shapes.Globe)
assert isinstance(point.atom(point), shapes.Arrow)
assert isinstance(binary, shapes.Opetope)
cospan = arrow.paste(arrow).atom(arrow, cospan=True)
assert cospan.fst == binary.input
assert cospan.snd == binary.output
def test_Shape_paste():
globe = Shape.globe(2)
arrow = Shape.arrow()
whisker_l = arrow.paste(globe)
whisker_r = globe.paste(arrow)
interch_1 = whisker_l.paste(whisker_r)
interch_2 = whisker_r.paste(whisker_l)
interch_3 = globe.paste(globe, 0)
assert interch_1 == interch_2 == interch_3
point = Shape.point()
assert point.paste(arrow, 0) == arrow
assert globe.paste(arrow, 1) == globe
with raises(ValueError) as err:
point.paste(point)
assert str(err.value) == utils.value_err(
-1, 'expecting non-negative integer')
cobinary = Shape.simplex(2)
with raises(ValueError) as err:
cobinary.paste(cobinary)
assert str(err.value) == utils.value_err(
cobinary,
'input 1-boundary does not match '
'output 1-boundary of {}'.format(repr(cobinary)))
binary = arrow.paste(arrow).atom(arrow)
assert isinstance(arrow.paste(arrow), shapes.GlobeString)
assert isinstance(whisker_l, shapes.Theta)
assert isinstance(binary.paste(globe), shapes.OpetopeTree)
cospan = globe.paste(arrow, cospan=True)
assert cospan.fst.source == globe
assert cospan.snd.source == arrow
assert cospan.target == whisker_r
def test_Shape_paste_along():
arrow = Shape.arrow()
pasting = arrow.paste(arrow, cospan=True)
atoming = pasting.target.atom(arrow, cospan=True)
binary = atoming.target
fst = binary.output
snd = pasting.fst.then(atoming.fst)
snd2 = pasting.snd.then(atoming.fst)
assoc_l = Shape.paste_along(fst, snd)
assert isinstance(assoc_l, shapes.OpetopeTree)
with raises(ValueError) as err:
Shape.paste_along(snd, snd)
assert str(err.value) == utils.value_err(
OgMapPair(snd, snd),
'not a well-formed span for pasting')
cospan = Shape.paste_along(fst, snd2, cospan=True)
assert cospan.fst.image().intersection(
cospan.snd.image()) == fst.then(cospan.fst).image()
def test_Shape_to_outputs():
arrow = Shape.arrow()
twothree = arrow.paste(arrow).atom(arrow.paste(arrow).paste(arrow))
threetwo = twothree.dual()
with raises(ValueError) as err:
twothree.to_outputs([2, 4], twothree)
assert str(err.value) == utils.value_err(
[2, 4], 'cannot paste to these outputs')
with raises(ValueError) as err:
twothree.to_outputs([2, 3], threetwo)
assert str(err.value) == utils.value_err(
[2, 3], 'does not match input boundary of {}'.format(
repr(threetwo)))
pasted = twothree.to_outputs([2, 3], twothree)
assert pasted.size == [7, 8, 2]
def test_Shape_to_inputs():
arrow = Shape.arrow()
twothree = arrow.paste(arrow).atom(arrow.paste(arrow).paste(arrow))
threetwo = twothree.dual()
with raises(ValueError) as err:
threetwo.to_inputs([0, 2], threetwo)
assert str(err.value) == utils.value_err(
[0, 2], 'cannot paste to these inputs')
with raises(ValueError) as err:
threetwo.to_inputs([0, 1], twothree)
assert str(err.value) == utils.value_err(
[0, 1], 'does not match output boundary of {}'.format(
repr(twothree)))
pasted = threetwo.to_inputs([0, 1], threetwo)
assert pasted.size == [7, 8, 2]
def test_Shape_suspend():
globe2 = Shape.globe(2)
arrow = Shape.arrow()
whisker_l = arrow.paste(globe2)
assert whisker_l.suspend().size == [2] + whisker_l.size
assert arrow.suspend(2) == globe2.suspend()
assert isinstance(arrow.paste(arrow).suspend(), shapes.GlobeString)
assert isinstance(whisker_l.suspend(), shapes.Theta)
def test_Shape_gray():
globe2 = Shape.globe(2)
arrow = Shape.arrow()
assert (globe2 * arrow).size == [4, 6, 4, 1]
assert Shape.gray() == Shape.point()
assert (arrow * arrow) * arrow == arrow * (arrow * arrow)
assert isinstance(arrow * arrow, shapes.Cube)
def test_Shape_join():
point = Shape.point()
arrow = Shape.arrow()
assert arrow >> point == point << point << point
assert Shape.join() == Shape.empty()
assert (point >> point) >> point == point >> (point >> point)
assert isinstance(arrow >> point, shapes.Simplex)
def test_Shape_dual():
arrow = Shape.arrow()
simplex = Shape.simplex(2)
binary = arrow.paste(arrow).atom(arrow)
assert binary == simplex.dual()
assoc_l = binary.to_inputs(0, binary)
assoc_r = binary.to_inputs(1, binary)
assert assoc_r == assoc_l.dual(1)
globe = Shape.globe(2)
assert isinstance(arrow.paste(globe).dual(), shapes.Theta)
def test_Shape_merge():
arrow = Shape.arrow()
binary = arrow.paste(arrow).atom(arrow)
ternary = arrow.paste(arrow).paste(arrow).atom(arrow)
assoc_l = binary.to_inputs(0, binary)
assert assoc_l.merge() == ternary
assert isinstance(assoc_l.merge(), shapes.Opetope)
def test_Shape_empty():
empty = Shape.empty()
assert len(empty) == 0
def test_Shape_point():
point = Shape.point()
assert len(point) == 1
def test_Shape_arrow():
arrow = Shape.arrow()
assert arrow.size == [2, 1]
def test_Shape_simplex():
assert len(Shape.simplex()) == 0
arrow = Shape.simplex(1)
assert arrow == Shape.arrow()
assert arrow >> arrow == Shape.simplex(3)
def test_Shape_cube():
assert len(Shape.cube()) == 1
arrow = Shape.cube(1)
assert arrow == Shape.arrow()
assert arrow*arrow == Shape.cube(2)
def test_Shape_globe():
assert len(Shape.globe()) == 1
arrow = Shape.globe(1)
assert arrow == Shape.arrow()
assert arrow.suspend() == Shape.globe(2)
def test_Shape_theta():
assert Shape.theta() == Shape.globe(0)
assert Shape.theta(Shape.theta()) == Shape.globe(1)
assert Shape.theta(Shape.theta(Shape.theta())) == Shape.globe(2)
point = Shape.theta()
arrow = Shape.arrow()
assert Shape.theta(point, point) == arrow.paste(arrow)
def test_Shape_id():
arrow = Shape.arrow()
assert arrow.id().source == arrow
assert arrow.id().target == arrow
def test_Shape_boundary():
arrow = Shape.arrow()
binary = arrow.paste(arrow).atom(arrow)
assert binary.boundary('-').source == arrow.paste(arrow)
assert binary.boundary('+').source == arrow
assert binary.boundary('-', 0).source == Shape.point()
assert binary.boundary('-').target == binary
assert not isinstance(binary.boundary().source, Shape)
assoc_l = binary.to_inputs(0, binary)
assert isinstance(assoc_l.boundary('-').source, shapes.OpetopeTree)
assert isinstance(assoc_l.boundary('+').source, shapes.Arrow)
def test_Shape_atom_inclusion():
arrow = Shape.arrow()
globe = Shape.globe(2)
whisker_l = arrow.paste(globe)
assert whisker_l.atom_inclusion(El(2, 0)).source == globe
assert isinstance(
whisker_l.atom_inclusion(El(2, 0)).source,
shapes.Globe)
binary = arrow.paste(arrow).atom(arrow)
assoc_l = binary.to_inputs(0, binary)
assert isinstance(
assoc_l.atom_inclusion(El(2, 0)).source,
shapes.Opetope)
simplex = Shape.simplex(3)
assert isinstance(
simplex.atom_inclusion(El(2, 0)).source,
shapes.Simplex)
cube = Shape.cube(3)
assert isinstance(
cube.atom_inclusion(El(2, 0)).source,
shapes.Cube)
def test_Shape_initial():
point = Shape.point()
empty = Shape.empty()
assert point.initial() == empty.terminal()
assert empty.initial() == empty.id()
assert point.initial().istotal
def test_Shape_terminal():
point = Shape.point()
assert point.terminal() == point.id()
def test_Shape_inflate():
simplex2 = Shape.simplex(2)
assert simplex2.inflate().boundary('-').source == \
simplex2
whisker_l = Shape.arrow().paste(Shape.globe(2))
assert whisker_l.inflate().boundary('+').source == \
whisker_l
def test_Shape_all_layerings():
globe = Shape.globe(2)
chain = globe.paste(globe, 0)
for n, x in enumerate(chain.all_layerings()):
number = n+1
assert number == 2
def test_Shape_generate_layering():
arrow = Shape.arrow()
globe = Shape.globe(2)
chain = globe.paste(globe, 0)
chain.generate_layering()
assert chain.layers[0].source == arrow.paste(globe)
assert chain.layers[1].source == globe.paste(arrow)
chain.generate_layering()
assert chain.layers[0].source == globe.paste(arrow)
assert chain.layers[1].source == arrow.paste(globe)
""" Tests for Shape subclasses """
def test_Simplex():
arrow = Shape.simplex(1)
triangle = Shape.simplex(2)
tetra = Shape.simplex(3)
assert tetra.simplex_face(3).source == triangle
assert tetra.simplex_face(0) == tetra.atom_inclusion(
El(2, 3))
map1 = triangle.simplex_degeneracy(2).then(
arrow.simplex_degeneracy(1))
map2 = triangle.simplex_degeneracy(1).then(
arrow.simplex_degeneracy(1))
assert map1 == map2
map3 = tetra.simplex_face(2).then(
triangle.simplex_degeneracy(2))
assert map3 == triangle.id()
map4 = tetra.simplex_face(0).then(
triangle.simplex_degeneracy(2))
map5 = arrow.simplex_degeneracy(1).then(
triangle.simplex_face(0))
assert map4 == map5
def test_Cube():
arrow = Shape.cube(1)
square = Shape.cube(2)
cube = Shape.cube(3)
map1 = square.cube_degeneracy(2).then(
arrow.cube_degeneracy(1))
map2 = square.cube_degeneracy(1).then(
arrow.cube_degeneracy(1))
assert map1 == map2
map3 = square.cube_face(0, '+').then(
cube.cube_face(2, '-'))
map4 = square.cube_face(1, '-').then(
cube.cube_face(0, '+'))
assert map3 == map4
map5 = square.cube_connection(1, '-').then(
arrow.cube_connection(0, '-'))
map6 = square.cube_connection(0, '-').then(
arrow.cube_connection(0, '-'))
assert map5 == map6
""" Tests for ShapeMap """
def test_ShapeMap_init():
point = OgPoset.point()
arrow = point >> point
ogmap = OgMap(
arrow, point,
[[El(0, 0), El(0, 0)], [El(0, 0)]])
with raises(TypeError) as err:
ShapeMap(ogmap)
assert str(err.value) == utils.type_err(Shape, arrow)
arrow = Shape.arrow()
ogmap = OgMap(
arrow, point,
[[El(0, 0), El(0, 0)], [El(0, 0)]])
with raises(TypeError) as err:
ShapeMap(ogmap)
assert str(err.value) == utils.type_err(Shape, point)
point = Shape.point()
undefined = OgMap(point, arrow)
with raises(ValueError) as err:
ShapeMap(undefined)
assert str(err.value) == utils.value_err(
undefined,
'a ShapeMap must be total')
def test_ShapeMap_then():
point = Shape.point()
arrow = Shape.arrow()
terminal = arrow.terminal()
first_inj = arrow.atom_inclusion(El(0, 0))
assert isinstance(terminal.then(first_inj), ShapeMap)
ogmap = OgMap(
arrow, point,
[[El(0, 0), El(0, 0)], [El(0, 0)]])
assert not isinstance(ogmap.then(first_inj), ShapeMap)
def test_ShapeMap_layers():
globe = Shape.globe(2)
cospan = globe.paste(globe, cospan=True)
twoglobes = cospan.target
cospanterminal = cospan.then(twoglobes.terminal())
assert twoglobes.terminal().layers == [
cospanterminal.fst, cospanterminal.snd]
def test_ShapeMap_rewrite_steps():
arrow = Shape.arrow()
globe = Shape.globe(2)
twoglobes = globe.paste(globe)
assert twoglobes.terminal().rewrite_steps == [
arrow.terminal(), arrow.terminal(), arrow.terminal()]
def test_ShapeMap_gray():
point = Shape.point()
arrow = Shape.arrow()
terminal = arrow.terminal()
first_inj = arrow.atom_inclusion(El(0, 0))
assert ShapeMap.gray() == point.id()
assert arrow.id() * arrow.id() == (arrow * arrow).id()
assert (terminal * arrow.id()).then(
first_inj * terminal) == terminal.then(first_inj) * terminal
def test_ShapeMap_join():
empty = Shape.empty()
arrow = Shape.arrow()
terminal = arrow.terminal()
first_inj = arrow.atom_inclusion(El(0, 0))
assert ShapeMap.join() == empty.id()
assert arrow.id() >> arrow.id() == (arrow >> arrow).id()
assert (terminal >> arrow.id()).then(
first_inj >> terminal) == terminal.then(first_inj) >> terminal
def test_ShapeMap_dual():
arrow = Shape.arrow()
degen0 = arrow.simplex_degeneracy(0)
degen1 = arrow.simplex_degeneracy(1)
assert degen0.op() == degen1
cdegen0 = arrow.cube_degeneracy(0)
cdegen1 = arrow.cube_degeneracy(1)
assert cdegen0.co() == cdegen1
| 15,171 | 27.358879 | 71 |
py
|
rewalt
|
rewalt-main/tests/test_ogposets.py
|
import numpy as np
from pytest import raises
from rewalt import utils
from rewalt.ogposets import (El, OgPoset, GrSet, GrSubset, Closed,
OgMap, OgMapPair)
""" Tests for El """
def test_El_init():
assert El(2, 3) == El(2, 3)
assert El(2, 3) != El(1, 3)
assert El(2, 3) != El(2, 2)
with raises(TypeError) as err:
El('x', 2)
assert str(err.value) == utils.type_err(int, 'x')
with raises(ValueError) as err:
El(3, -1)
assert str(err.value) == utils.value_err(
-1, 'expecting non-negative integer')
def test_El_dim():
assert El(2, 3).dim == 2
assert El(2, 3).pos == 3
def test_El_shifted():
assert El(2, 3).shifted(4) == El(2, 7)
with raises(TypeError) as err:
El(2, 3).shifted('x')
assert str(err.value) == utils.type_err(int, 'x')
with raises(ValueError) as err:
El(2, 3).shifted(-4)
assert str(err.value) == utils.value_err(
-4, 'shifted position must be non-negative')
""" Tests for OgPoset """
def test_OgPoset_init():
test_face = [
[
{'-': set(), '+': set()},
{'-': set(), '+': set()},
], [
{'-': {0}, '+': {1}}
]]
test_coface = [
[
{'-': {0}, '+': set()},
{'-': set(), '+': {0}},
], [
{'-': set(), '+': set()}
]]
assert OgPoset(test_face, test_coface) == OgPoset(test_face, test_coface)
test_face[0][0]['-'] = {0}
with raises(ValueError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.value_err(0, 'out of bounds')
test_face[0][0]['-'] = set()
test_face[1][0]['-'] = {2}
with raises(ValueError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.value_err(2, 'out of bounds')
test_face[1][0]['-'] = {1}
with raises(ValueError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.value_err(
test_face, 'input and output faces of El(1, 0) are not disjoint')
test_face[1][0]['-'] = set()
test_face[1][0]['+'] = set()
with raises(ValueError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.value_err(
test_face, 'El(1, 0) must have at least one face')
test_face[1][0]['-'] = {1}
test_face[1][0]['+'] = {0}
with raises(ValueError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.value_err(
test_coface, 'face and coface data do not match')
test_face[1][0]['-'] = {'x'}
with raises(TypeError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.type_err(int, 'x')
test_face[1][0]['-'] = 0
with raises(TypeError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.type_err(set, 0)
test_face[1][0] = {'k': {0}}
with raises(ValueError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.value_err(
{'k': {0}},
"expecting dict with keys '-', '+'")
test_face[1][0] = {0}
with raises(TypeError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.type_err(dict, {0})
test_face[1] = []
with raises(ValueError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.value_err(
[], 'expecting non-empty list')
test_face[1] = {0}
with raises(TypeError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.type_err(list, {0})
test_face = {0}
with raises(TypeError) as err:
OgPoset(test_face, test_coface)
assert str(err.value) == utils.type_err(list, {0})
""" Various example objects here """
whisker_face = [
[
{'-': set(), '+': set()},
{'-': set(), '+': set()},
{'-': set(), '+': set()}
], [
{'-': {0}, '+': {1}},
{'-': {0}, '+': {1}},
{'-': {1}, '+': {2}}
], [
{'-': {0}, '+': {1}}
]]
whisker_coface = [
[
{'-': {0, 1}, '+': set()},
{'-': {2}, '+': {0, 1}},
{'-': set(), '+': {2}}
], [
{'-': {0}, '+': set()},
{'-': set(), '+': {0}},
{'-': set(), '+': set()}
], [
{'-': set(), '+': set()}
]]
whisker = OgPoset(whisker_face, whisker_coface)
empty = OgPoset([], [])
point = OgPoset.from_face_data([
[{'-': set(), '+': set()}]
])
interval_face = [
[
{'-': set(), '+': set()},
{'-': set(), '+': set()},
], [
{'-': {0}, '+': {1}},
]]
interval = OgPoset.from_face_data(interval_face)
test_grset = GrSet(El(0, 2), El(2, 0), El(0, 5))
test_grsubset = GrSubset(GrSet(El(0, 2), El(2, 0)), whisker)
test_closed = test_grsubset.closure()
interval_grsubset = GrSubset(GrSet(El(0, 1)), interval)
whisker_all = whisker.all()
injection = OgMap(interval, whisker, [
[El(0, 1), El(0, 2)],
[El(1, 2)]])
collapse = OgMap(whisker, interval, [
[El(0, 0), El(0, 1), El(0, 1)],
[El(1, 0), El(1, 0), El(0, 1)],
[El(1, 0)]])
composite = OgMap(interval, interval, [
[El(0, 1), El(0, 1)],
[El(0, 1)]])
def test_OgPoset_str():
assert str(whisker) == 'OgPoset with [3, 3, 1] elements'
def test_OgPoset_getitem():
assert whisker[2] == GrSubset(GrSet(El(2, 0)), whisker)
def test_OgPoset_contains():
assert El(1, 2) in whisker
assert El(1, 3) not in whisker
assert El(3, 0) not in whisker
def test_OgPoset_len():
assert len(whisker) == 7
assert len(interval) == 3
def test_OgPoset_size():
assert whisker.size == [3, 3, 1]
def test_OgPoset_dim():
assert whisker.dim == 2
def test_OgPoset_as_chain():
chain = [
np.array([[-1, -1, 0], [1, 1, -1], [0, 0, 1]]),
np.array([[-1], [1], [0]])
]
test_chain = whisker.as_chain
assert (test_chain[0] == chain[0]).all() and \
(test_chain[1] == chain[1]).all()
def test_OgPoset_all():
assert whisker.all() == Closed(
GrSet(El(0, 0), El(0, 1), El(0, 2),
El(1, 0), El(1, 1), El(1, 2),
El(2, 0)),
whisker)
def test_OgPoset_faces():
assert whisker.faces(El(2, 0), '-') == GrSet(El(1, 0))
assert whisker.faces(El(2, 0), '+') == GrSet(El(1, 1))
assert whisker.faces(El(0, 0), 0) == GrSet()
assert whisker.faces(El(1, 2)) == GrSet(El(0, 1), El(0, 2))
def test_OgPoset_from_face_data():
assert whisker == OgPoset.from_face_data(whisker_face)
def test_OgPoset_id():
assert whisker.image(whisker.id()) == whisker.all()
def test_OgPoset_boundary():
assert whisker.boundary('-', 0).target == whisker
assert whisker.boundary('-', 0).source == point
def test_OgPoset_coproduct():
assert OgPoset.coproduct(point, point).iscospan
assert OgPoset.coproduct(point, whisker).snd.isinjective
def test_OgPoset_disjoint_union():
assert OgPoset.disjoint_union(point, interval).size == [3, 1]
assert OgPoset.disjoint_union(interval, whisker).size == [5, 4, 1]
assert whisker + empty == empty + whisker == whisker
def test_OgPoset_gray():
assert OgPoset.gray(interval, interval).size == [4, 4, 1]
assert OgPoset.gray(interval, point) == interval
""" Tests for GrSet """
def test_GrSet_init():
assert GrSet(El(0, 2), El(1, 4), El(0, 3)) \
== GrSet(El(0, 2), El(1, 4), El(0, 3))
with raises(TypeError) as err:
GrSet((0, 2))
assert str(err.value) == utils.type_err(El, (0, 2))
def test_GrSet_str():
assert str(test_grset) == 'GrSet(El(0, 2), El(0, 5), El(2, 0))'
def test_GrSet_contains():
assert El(0, 5) in test_grset
assert El(0, 4) not in test_grset
assert El(1, 0) not in test_grset
assert 'x' not in test_grset
def test_GrSet_len():
assert len(test_grset) == 3
def test_GrSet_iter():
assert test_grset == GrSet(*test_grset)
def test_GrSet_getitem():
assert test_grset[0] == GrSet(El(0, 2), El(0, 5))
assert test_grset[1] == GrSet()
assert test_grset[:3] == test_grset
assert test_grset[1:] == test_grset[2]
assert test_grset[:] == test_grset
with raises(KeyError) as err:
test_grset['x']
assert str(err.value) == "'x'"
def test_GrSet_grades():
assert test_grset.grades == [0, 2]
def test_GrSet_dim():
assert test_grset.dim == 2
assert GrSet().dim == -1
def test_GrSet_as_set():
assert test_grset.as_set == \
{El(0, 2), El(2, 0), El(0, 5)}
def test_GrSet_as_list():
assert test_grset.as_list == \
[El(0, 2), El(0, 5), El(2, 0)]
def test_GrSet_add():
test_grset.add(El(3, 6))
assert test_grset == GrSet(El(0, 2), El(3, 6), El(2, 0), El(0, 5))
test_grset.add(El(0, 5))
assert test_grset == GrSet(El(0, 2), El(3, 6), El(2, 0), El(0, 5))
with raises(TypeError) as err:
test_grset.add((3, 5))
assert str(err.value) == utils.type_err(El, (3, 5))
def test_GrSet_remove():
test_grset.remove(El(3, 6))
assert test_grset == GrSet(El(0, 2), El(2, 0), El(0, 5))
with raises(ValueError) as err:
test_grset.remove(El(3, 6))
assert str(err.value) == utils.value_err(
El(3, 6), 'not in graded set')
def test_GrSet_union():
assert test_grset.union(GrSet(El(1, 3), El(2, 0))) == \
GrSet(El(0, 2), El(0, 5), El(1, 3), El(2, 0))
def test_GrSet_intersection():
assert test_grset.intersection(GrSet(El(1, 3), El(2, 0))) == \
GrSet(El(2, 0))
assert test_grset.intersection(GrSet(El(1, 3))) == GrSet()
def test_GrSet_issubset():
assert test_grset.issubset(test_grset)
assert GrSet(El(0, 2), El(2, 0)).issubset(test_grset)
assert not GrSet(El(0, 3)).issubset(test_grset)
def test_GrSet_isdisjoint():
assert test_grset.isdisjoint(GrSet(El(1, 3), El(0, 4)))
assert not test_grset.isdisjoint(GrSet(El(0, 2), El(1, 3)))
""" Tests for GrSubset """
def test_GrSubset():
assert GrSubset(GrSet(El(0, 2), El(2, 0)), whisker) == \
GrSubset(GrSet(El(0, 2), El(2, 0)), whisker)
assert GrSubset(GrSet(El(0, 1)), interval) != \
GrSubset(GrSet(El(0, 1)), whisker)
assert GrSubset(GrSet(El(0, 1)), whisker) != \
GrSubset(GrSet(El(0, 2)), whisker)
def test_GrSubset_init():
with raises(ValueError) as err:
GrSubset(test_grset, whisker)
assert str(err.value) == utils.value_err(
test_grset, 'does not define a subset')
def test_GrSubset_str():
assert str(GrSubset(GrSet(El(0, 1)), interval)) == \
'GrSubset with 1 elements in OgPoset with [2, 1] elements'
def test_GrSubset_contains():
assert El(0, 2) in test_grsubset
assert El(1, 1) not in test_grsubset
def test_GrSubset_getitem():
assert test_grsubset[0] == GrSubset(GrSet(El(0, 2)), whisker)
assert test_grsubset[1:] == GrSubset(GrSet(El(2, 0)), whisker)
def test_GrSubset_support():
assert test_grsubset.support == GrSet(El(0, 2), El(2, 0))
def test_GrSubset_ambient():
assert test_grsubset.ambient == whisker
def test_GrSubset_isclosed():
assert not test_grsubset.isclosed
assert whisker_all.isclosed
def test_GrSubset_union():
assert test_grsubset.union(GrSubset(
GrSet(El(0, 2), El(1, 1)), whisker), GrSubset(
GrSet(El(2, 0), El(1, 2)), whisker)) == GrSubset(
GrSet(El(0, 2), El(2, 0), El(1, 1), El(1, 2)), whisker)
with raises(ValueError) as err:
test_grsubset.union(interval_grsubset)
assert str(err.value) == utils.value_err(
interval_grsubset,
'not a subset of the same OgPoset')
assert not isinstance(
test_grsubset.union(whisker.all()),
Closed)
assert isinstance(
test_grsubset.closure().union(whisker.all()),
Closed)
def test_GrSubset_intersection():
assert test_grsubset.intersection(GrSubset(
GrSet(El(0, 2), El(1, 1)), whisker)) == GrSubset(
GrSet(El(0, 2)), whisker)
assert not isinstance(
test_grsubset.intersection(whisker.all()),
Closed)
assert isinstance(
test_grsubset.closure().intersection(whisker.all()),
Closed)
def test_GrSubset_closure():
assert test_grsubset.closure() == Closed(
GrSet(El(0, 0), El(0, 1), El(0, 2),
El(1, 0), El(1, 1), El(2, 0)),
whisker)
assert whisker_all.maximal().closure() == \
whisker_all
def test_GrSubset_image():
assert test_grsubset.image(collapse) == GrSubset(
GrSet(El(0, 1), El(1, 0)), interval)
with raises(ValueError) as err:
test_grsubset.image(injection)
assert str(err.value) == utils.value_err(
injection, 'OgMap source does not match ambient OgPoset')
assert isinstance(
test_closed.image(collapse),
Closed)
assert not isinstance(
test_grsubset.image(collapse),
Closed)
""" Tests for Closed """
def test_Closed():
assert Closed(
GrSet(El(0, 0), El(0, 1), El(1, 1)),
whisker) == GrSubset(
GrSet(El(1, 1)), whisker).closure()
with raises(ValueError) as err:
Closed(GrSet(El(1, 1)), whisker)
assert str(err.value) == utils.value_err(
GrSet(El(1, 1)), 'not a closed subset')
def test_Closed_as_map():
assert Closed(
GrSet(El(0, 1), El(0, 2), El(1, 2)),
whisker).as_map == injection
assert test_closed.as_map.image() == test_closed
assert interval.image(injection).as_map == injection
def test_Closed_maximal():
assert whisker_all.maximal() == GrSubset(
GrSet(El(2, 0), El(1, 2)), whisker)
assert test_closed.maximal() == test_grsubset
def test_Closed_boundary():
assert whisker_all.boundary('-', 0) == Closed(
GrSet(El(0, 0)), whisker)
assert whisker_all.boundary('+', 0) == Closed(
GrSet(El(0, 2)), whisker)
assert whisker_all.boundary('s', 1) == GrSubset(
GrSet(El(1, 0), El(1, 2)), whisker).closure()
assert whisker_all.boundary('+') == GrSubset(
GrSet(El(1, 1), El(1, 2)), whisker).closure()
assert whisker_all.boundary(0, 2) == whisker_all
assert whisker_all.boundary() == Closed.subset(whisker_all[:2])
assert whisker_all.boundary(None, 0) == Closed(
GrSet(El(0, 0), El(0, 2)), whisker)
assert whisker_all.boundary('-', 3) == whisker_all
assert whisker_all.boundary('-', -1) == Closed(
GrSet(), whisker)
assert Closed(GrSet(El(0, 0)), whisker).boundary('-') == \
Closed(GrSet(), whisker)
assert Closed(GrSet(), whisker).boundary('-') == \
Closed(GrSet(), whisker)
""" Tests for OgMap """
def test_OgMap_init():
assert OgMap(whisker, interval) == OgMap(whisker, interval)
# TODO: tests for well-formedness
def test_OgMap_getitem():
assert injection[El(0, 0)] == El(0, 1)
assert collapse[El(2, 0)] == El(1, 0)
assert OgMap(interval, whisker)[El(0, 1)] is None
with raises(ValueError) as err:
injection[El(0, 2)]
assert str(err.value) == utils.value_err(
El(0, 2), 'not in source')
def test_OgMap_setitem():
test_setitem = OgMap(interval, whisker)
test_setitem[El(0, 0)] = El(0, 1)
with raises(ValueError) as err:
test_setitem[El(0, 1)] = El(0, 3)
assert str(err.value) == utils.value_err(
El(0, 3), 'not in target')
with raises(ValueError) as err:
test_setitem[El(0, 0)] = El(0, 2)
assert str(err.value) == utils.value_err(
El(0, 0), 'already defined on element')
with raises(ValueError) as err:
test_setitem[El(0, 1)] = El(1, 0)
assert str(err.value) == utils.value_err(
El(1, 0), 'exceeds dimension of El(0, 1)')
with raises(ValueError) as err:
test_setitem[El(1, 0)] = El(1, 2)
assert str(err.value) == utils.value_err(
El(1, 0), 'map undefined on El(0, 1) below El(1, 0)')
test_setitem[El(0, 1)] = El(0, 2)
with raises(ValueError) as err:
test_setitem[El(1, 0)] = El(1, 1)
assert str(err.value) == utils.value_err(
El(1, 1),
'assignment does not respect (-, 0)-boundary of El(1, 0)')
test_setitem[El(1, 0)] = El(1, 2)
assert test_setitem == injection
def test_OgMap_mapping():
assert OgMap(interval, whisker).mapping == [[None, None], [None]]
def test_OgMap_istotal():
assert injection.istotal
assert not OgMap(interval, whisker).istotal
def test_OgMap_isinjective():
assert injection.isinjective
assert not collapse.isinjective
def test_OgMap_issurjective():
assert not injection.issurjective
assert collapse.issurjective
def test_OgMap_isiso():
assert whisker.id().isiso
assert not OgMap(whisker, empty).isiso
def test_OgMap_isdefined():
assert injection.isdefined(El(0, 1))
assert not OgMap(interval, whisker).isdefined(El(0, 1))
assert not injection.isdefined(El(0, 2))
def test_OgMap_then():
assert injection.then(collapse) == composite
assert injection.then(OgMap(whisker, whisker)) == \
OgMap(interval, whisker)
assert injection.then(
collapse, OgMap(interval, interval)) == \
OgMap(interval, interval)
with raises(ValueError) as err:
injection.then(injection)
assert str(err.value) == utils.value_err(
injection, 'source does not match target of first map')
def test_OgMap_inv():
assert whisker.id().inv() == whisker.id()
with raises(ValueError) as err:
injection.inv()
assert str(err.value) == utils.value_err(
injection, 'not an isomorphism')
""" Tests for OgMapPair """
def test_OgMapPair():
assert OgMapPair(injection, collapse) == \
OgMapPair(injection, collapse)
assert OgMapPair(injection, collapse) != \
OgMapPair(collapse, injection)
def test_OgMapPair_source():
assert OgMapPair(injection, interval.id()).source == interval
def test_OgMapPair_target():
assert OgMapPair(collapse, interval.id()).target == interval
def test_OgMapPair_isspan():
assert OgMapPair(
injection, injection.then(collapse)).isspan
assert not OgMapPair(injection, collapse).isspan
def test_OgMapPair_iscospan():
assert OgMapPair(
injection, collapse.then(injection)).iscospan
assert not OgMapPair(injection, collapse).iscospan
def test_OgMapPair_isparallel():
assert OgMapPair(composite, interval.id()).isparallel
def test_OgMapPair_pushout():
assert OgMapPair(
interval.boundary('+'),
interval.boundary('-')).pushout().target.size == [3, 2]
| 18,931 | 25.968661 | 77 |
py
|
rewalt
|
rewalt-main/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
rewalt
|
rewalt-main/docs/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
def get_version():
from rewalt import __version__
return __version__
# -- Project information -----------------------------------------------------
project = 'rewalt'
copyright = '2022, Amar Hadzihasanovic'
author = 'Amar Hadzihasanovic'
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'nbsphinx',
]
autosummary_generate = True
autosummary_context = {"excluded": ["__init__"]}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 2,221 | 30.295775 | 79 |
py
|
rewalt
|
rewalt-main/rewalt/strdiags.py
|
"""
Implements string diagram visualisations.
"""
import networkx as nx
from rewalt import (utils, ogposets, shapes, diagrams, drawing)
DEFAULT = {
'tikz': False,
'scale': 3,
'show': True,
'depth': True,
'bgcolor': 'white',
'fgcolor': 'black',
'infocolor': 'magenta',
'degenalpha': 0.1,
'labels': True,
'labeloffset': (4, 4),
'positions': False,
'positionoffset': (4, -16),
'positionoffsettikz': (4, -6),
'orientation': 'bt'}
class StrDiag:
"""
Class for string diagram visualisations of diagrams and shapes.
A string diagram depicts a top-dimensional "slice" of a diagram.
The top-dimensional cells are represented as *nodes*, and the
codimension-1 cells are represented as *wires*. The inputs of a
top-dimensional cell are incoming wires of the associated node,
and the outputs are outgoing wires.
The input->node->output order determines an acyclic flow
between nodes and wires, which is represented in a string diagram
by placing them at different "heights".
There are two other "flows" that we take into account:
- from codimension-2 inputs, to top-dimensional or codimension-1
cell, to codimension-2 outputs (only in dimension > 1);
- from codimension-3 inputs, to codimension-1 cells, to
codimension-3 outputs (only in dimension > 2).
These are not in general acyclic; however, we obtain an acyclic
flow by removing all directed loops. If there is a flow of the first
kind between nodes and wires, we place them at different "widths".
If there is a flow of the second kind between wires, we place them
at different "depths"; this is only seen when wires cross each other,
in which case the one of lower depth is depicted as passing over
the one of higher depth.
Internally, these data are encoded as a triple of NetworkX directed
graphs, sharing the same vertices, partitioned into "node vertices"
and "wire vertices". These graphs encode the "main (height) flow", the
"width flow" and the "depth flow" between nodes and wires.
The class then contains a method :meth:`place_vertices` that places
the vertices on a [0, 1]x[0, 1] canvas, taking into account the
height and width relations and resolving clashes.
Finally, it contains a method :meth:`draw` that outputs a
visualisation of the string diagram. The visualisation has
customisable colours, orientation, and labels, and works with any
:class:`drawing.DrawBackend`; currently available are
- a Matplotlib backend, and
- a TikZ backend.
Arguments
---------
diagram : :class:`diagrams.Diagram | shapes.Shape | shapes.ShapeMap`
A diagram or a shape or a shape map.
Notes
-----
The "main flow" graph is essentially the *open graph* encoding of
the string diagram in the sense of Dixon & Kissinger.
"""
def __init__(self, diagram):
if isinstance(diagram, diagrams.Diagram):
shape = diagram.shape
generators = diagram.ambient.generators
self.name = diagram.name
def isdegenerate(x):
return generators[diagram[x]]['shape'].dim != x.dim
else:
if isinstance(diagram, shapes.Shape):
diagram = diagram.id()
if isinstance(diagram, shapes.ShapeMap):
shape = diagram.source
generators = {x: {} for x in diagram.target}
self.name = str(diagram)
def isdegenerate(x):
return diagram[x].dim != x.dim
else:
raise TypeError(utils.type_err(
diagrams.Diagram, diagram))
dim = shape.dim
self._nodes = {
x: {
'label': diagram[x],
'color': generators[diagram[x]].get(
'color', None),
'stroke': generators[diagram[x]].get(
'stroke',
generators[diagram[x]].get(
'color', None)),
'draw_node': generators[diagram[x]].get(
'draw_node', True),
'draw_label': generators[diagram[x]].get(
'draw_label', True),
'isdegenerate': isdegenerate(x)
}
for x in shape[dim]}
self._wires = {
x: {
'label': diagram[x],
'color': generators[diagram[x]].get(
'stroke',
generators[diagram[x]].get(
'color', None)),
'draw_label': generators[diagram[x]].get(
'draw_label', True),
'isdegenerate': isdegenerate(x)
}
for x in shape[dim-1]}
graph = nx.DiGraph()
graph.add_nodes_from(self.nodes)
graph.add_nodes_from(self.wires)
if dim >= 1:
out_1 = dict()
in_1 = dict()
for x in self.nodes:
out_1[x] = shape.faces(x, '+')
in_1[x] = shape.faces(x, '-')
for y in in_1[x]:
graph.add_edge(y, x)
for y in out_1[x]:
graph.add_edge(x, y)
widthgraph = nx.DiGraph()
widthgraph.add_nodes_from(self.nodes)
widthgraph.add_nodes_from(self.wires)
if dim >= 2:
out_2 = dict()
in_2 = dict()
for x in self.wires:
out_2[x] = shape.faces(x, '+')
in_2[x] = shape.faces(x, '-')
for x in self.nodes:
out_2[x] = ogposets.GrSet(*[
z for w in out_1[x]
for z in shape.faces(w, '+')
if shape.cofaces(z, '-').isdisjoint(out_1[x])])
in_2[x] = ogposets.GrSet(*[
z for w in in_1[x]
for z in shape.faces(w, '-')
if shape.cofaces(z, '+').isdisjoint(in_1[x])])
for x in widthgraph:
for y in widthgraph:
if not out_2[x].isdisjoint(in_2[y]):
widthgraph.add_edge(x, y)
depthgraph = nx.DiGraph()
depthgraph.add_nodes_from(self.wires)
if dim >= 3:
out_3 = dict()
in_3 = dict()
for x in depthgraph:
out_3[x] = ogposets.GrSet(*[
z for w in out_2[x]
for z in shape.faces(w, '+')
if shape.cofaces(z, '-').isdisjoint(out_2[x])])
in_3[x] = ogposets.GrSet(*[
z for w in in_2[x]
for z in shape.faces(w, '-')
if shape.cofaces(z, '+').isdisjoint(in_2[x])])
for x in depthgraph:
for y in depthgraph:
if not out_3[x].isdisjoint(in_3[y]):
depthgraph.add_edge(x, y)
def remove_cycles(graph):
cycles = list(nx.simple_cycles(graph))
to_delete = set()
for cycle in cycles:
for i in range(len(cycle) - 1):
to_delete.add((cycle[i], cycle[i+1]))
to_delete.add((cycle[-1], cycle[0]))
graph.remove_edges_from(to_delete)
remove_cycles(widthgraph)
remove_cycles(depthgraph)
self._graph = graph
self._widthgraph = widthgraph
self._depthgraph = depthgraph
def __str__(self):
return '{} with {} nodes and {} wires'.format(
type(self).__name__, str(len(self.nodes)),
str(len(self.wires)))
def __eq__(self, other):
return isinstance(other, StrDiag) and \
self.graph == other.graph and \
self.widthgraph == other.widthgraph and \
self.depthgraph == other.depthgraph
@property
def graph(self):
"""
Returns the main flow graph between node and wire vertices.
Returns
-------
graph : :class:`networkx.DiGraph`
The main flow graph.
"""
return self._graph
@property
def widthgraph(self):
"""
Returns the "width" flow graph between node and wire vertices.
Returns
-------
widthgraph : :class:`networkx.DiGraph`
The width flow graph.
"""
return self._widthgraph
@property
def depthgraph(self):
"""
Returns the "depth" flow graph between wire vertices.
Returns
-------
depthgraph : :class:`networkx.DiGraph`
The depth flow graph.
"""
return self._depthgraph
@property
def nodes(self):
"""
Returns the nodes of the string diagram, together with all
the stored associated information.
This is a dictionary whose keys are the elements
of the diagram's shape corresponding to nodes. For each node, the
object stores another dictionary, which contains
- the node's label (:code:`label`),
- the node's fill colour (:code:`color`) and stroke colour
(:code:`stroke`),
- booleans specifying whether to draw the node and/or its label
(:code:`draw_node`, :code:`draw_label`), and
- a boolean specifying whether the node represents a degenerate
cell (:code:`isdegenerate`).
Returns
-------
nodes : :class:`dict[dict]`
The nodes of the string diagram.
"""
return self._nodes
@property
def wires(self):
"""
Returns the wires of the string diagram, together with all
the stored associated information.
This is a dictionary whose keys are the elements
of the diagram's shape corresponding to wires. For each node, the
object stores another dictionary, which contains
- the wire's label (:code:`label`),
- the wire's colour (:code:`color`),
- a boolean specifying whether to draw the wire's label
(:code:`draw_label`), and
- a boolean specifying whether the wire represents a degenerate
cell (:code:`isdegenerate`).
Returns
-------
wires : :class:`dict[dict]`
The nodes of the string diagram.
"""
return self._wires
def place_vertices(self):
"""
Places node and wire vertices on the unit square canvas, and
returns their coordinates.
The node and wire vertices are first placed on different heights
and widths, proportional to the ratio between the longest path
to the vertex and the longest path from the vertex in the main
flow graph and the width flow graph.
In dimension > 2, this may result in clashes, where some vertices
are given the same coordinates. In this case, these are
resolved by "splitting" the clashing vertices, placing them
at equally spaced angles of a circle centred on the clash
coordinates, with an appropriately small radius that does not
result in further clashes.
The coordinates are returned as a dictionary whose keys are
the elements corresponding to nodes and wires.
Returns
-------
coordinates : :class:`dict[tuple[float]]`
The coordinates assigned to wire and node vertices.
"""
def longest_paths(graph):
tsort = list(nx.topological_sort(graph))
longest_paths = dict()
for x in tsort:
longest_fw = {y: -1 for y in graph}
longest_fw[x] = 0
for y in (y for y in tsort if longest_fw[y] >= 0):
for z in graph.successors(y):
if longest_fw[z] < longest_fw[y] + 1:
longest_fw[z] = longest_fw[y] + 1
longest_bw = {y: -1 for y in graph}
longest_bw[x] = 0
for y in (y for y in reversed(tsort) if longest_bw[y] >= 0):
for z in graph.predecessors(y):
if longest_bw[z] < longest_bw[y] + 1:
longest_bw[z] = longest_bw[y] + 1
longest_paths[x] = (
max(longest_bw.values()),
max(longest_fw.values()))
return longest_paths
longest_width = longest_paths(self.widthgraph)
longest_height = longest_paths(self.graph)
xstep = 1 / (max(
[longest_width[x][0] for x in longest_width], default=0) + 2)
ystep = 1 / (max(
[longest_height[x][0] for x in longest_height], default=0) + 2)
coordinates = dict()
sources, sinks = [], []
for x in self.graph:
coordinates[x] = (
(longest_width[x][0] + 1) / (sum(longest_width[x]) + 2),
(longest_height[x][0] + 1) / (sum(longest_height[x]) + 2)
)
if self.graph.in_degree(x) == 0:
sources.append(x)
if self.graph.out_degree(x) == 0:
sinks.append(x)
def solve_clashes(coord_dict):
from math import (sin, cos, pi)
for coord in set(coord_dict.values()): # Solve clashes
keys = [x for x in coord_dict if coord_dict[x] == coord]
if len(keys) > 1:
n = len(keys)
for k, x in enumerate(keys):
coordinates[x] = (
coordinates[x][0] + (xstep/3)*cos(.4 + (2*pi*k)/n),
coordinates[x][1] + (ystep/3)*sin(.4 + (2*pi*k)/n)
)
solve_clashes(coordinates) # xy clashes in initial placement
sources_x = {x: coordinates[x][0] for x in sources}
sinks_x = {x: coordinates[x][0] for x in sinks}
solve_clashes(sources_x) # x clashes in input boundary
solve_clashes(sinks_x) # x clashes in output boundary
return coordinates
def draw(self, **params):
"""
Outputs a visualisation of the string diagram, using a backend.
Currently supported are a Matplotlib backend and a TikZ backend;
in both cases it is possible to show the output (as a pop-up
window for Matplotlib, or as code for TikZ) or save to file.
Various customisation options are available, including different
orientations and colours.
Keyword arguments
-----------------
tikz : :class:`bool`
Whether to output TikZ code (default is :code:`False`).
show : :class:`bool`
Whether to show the output (default is :code:`True`).
path : :class:`str`
Path where to save the output (default is :code:`None`).
orientation : :class:`str`
Orientation of the string diagram: one of :code:`'bt'`
(bottom-to-top), :code:`'lr'` (left-to-right),
:code:`'tb'` (top-to-bottom), :code:`'rl'` (right-to-left)
(default is :code:`'bt'`).
depth : :class:`bool`
Whether to take into account the depth flow graph when
drawing wires (default is :code:`True`).
bgcolor : multiple types
The background colour (default is :code:`'white'`).
fgcolor : multiple types
The foreground colour, given by default to nodes, wires,
and labels (default is :code:`'black'`).
infocolor : multiple types
The colour of additional information displayed in
the diagram, such as positions (default is :code:`'magenta'`).
wirecolor : multiple types
The default wire colour (default is same as `fgcolor`).
nodecolor : multiple types
The default node fill colour (default is same as `fgcolor`).
nodestroke : multiple types
The default node stroke colour (default is same as `nodecolor`).
degenalpha : :class:`float`
The alpha factor of wires corresponding to degenerate cells
(default is :code:`0.1`).
labels : :class:`bool`
Whether to display node and wire labels (default is
:code:`True`).
nodelabels : :class:`bool`
Whether to display node labels (default is same as `labels`).
wirelabels : :class:`bool`
Whether to display wire labels (default is same as `labels`).
labeloffset : :class:`tuple[float]`
Point offset of labels relative to vertices (default is
:code:`(4, 4)`).
positions : :class:`bool`
Whether to display node and wire positions (default is
:code:`False`).
nodepositions : :class:`bool`
Whether to display node positions (default is same as
`positions`).
wirepositions : :class:`bool`
Whether to display wire positions (default is same as
`positions`).
positionoffset : :class:`tuple[float]`
Point offset of positions relative to vertices (default is
:code:`(4, -16)` for Matplotlib, :code:`(4, -6)` for TikZ).
scale : :class:`float`
(TikZ only) Scale factor to apply to output (default is
:code:`3`).
xscale : :class:`float`
(TikZ only) Scale factor to apply to x axis in output
(default is same as `scale`)
yscale : :class:`float`
(TikZ only) Scale factor to apply to y axis in output
(default is same as `scale`)
"""
# Parameters
tikz = params.get('tikz', DEFAULT['tikz'])
scale = params.get('scale', DEFAULT['scale'])
xscale = params.get('xscale', scale)
yscale = params.get('yscale', scale)
show = params.get('show', DEFAULT['show'])
path = params.get('path', None)
depth = params.get('depth', DEFAULT['depth'])
bgcolor = params.get(
'bgcolor', DEFAULT['bgcolor'])
fgcolor = params.get(
'fgcolor', DEFAULT['fgcolor'])
infocolor = params.get(
'infocolor', DEFAULT['infocolor'])
wirecolor = params.get('wirecolor', fgcolor)
nodecolor = params.get('nodecolor', fgcolor)
nodestroke = params.get('nodestroke', nodecolor)
degenalpha = params.get(
'degenalpha', DEFAULT['degenalpha'])
labels = params.get(
'labels', DEFAULT['labels'])
wirelabels = params.get('wirelabels', labels)
nodelabels = params.get('nodelabels', labels)
labeloffset = params.get(
'labeloffset', DEFAULT['labeloffset'])
positions = params.get(
'positions', DEFAULT['positions'])
wirepositions = params.get('wirepositions', positions)
nodepositions = params.get('nodepositions', positions)
podefault = DEFAULT['positionoffsettikz'] \
if tikz else DEFAULT['positionoffset']
positionoffset = params.get(
'positionoffset', podefault)
orientation = params.get(
'orientation', DEFAULT['orientation'])
coord = self.place_vertices()
backendclass = drawing.TikZBackend if tikz else drawing.MatBackend
backend = backendclass(
bgcolor=bgcolor,
fgcolor=fgcolor,
orientation=orientation,
name=self.name)
wiresort = list(nx.topological_sort(self.depthgraph))
for wire in reversed(wiresort):
color = wirecolor if self.wires[wire]['color'] is None \
else self.wires[wire]['color']
alpha = degenalpha if self.wires[wire]['isdegenerate'] else 1
for node in [
*self.graph.predecessors(wire),
*self.graph.successors(wire)
]:
backend.draw_wire(
coord[wire], coord[node],
color=color,
alpha=alpha,
depth=depth)
if self.graph.in_degree(wire) == 0:
backend.draw_wire(
coord[wire], (coord[wire][0], 0),
color=color,
alpha=alpha,
depth=depth)
if self.graph.out_degree(wire) == 0:
backend.draw_wire(
coord[wire], (coord[wire][0], 1),
color=color,
alpha=alpha,
depth=depth)
if wirepositions:
backend.draw_label(
str(wire.pos),
coord[wire],
positionoffset,
color=infocolor)
if wirelabels and self.wires[wire]['draw_label']:
backend.draw_label(
self.wires[wire]['label'],
coord[wire],
labeloffset)
def is_drawn(node):
if self.nodes[node]['isdegenerate']:
return False
return self.nodes[node]['draw_node']
for node in self.nodes:
stroke = nodestroke if self.nodes[node]['stroke'] is None \
else self.nodes[node]['stroke']
color = nodecolor if self.nodes[node]['color'] is None \
else self.nodes[node]['color']
if is_drawn(node):
backend.draw_node(
coord[node],
color=color,
stroke=stroke)
if nodelabels and self.nodes[node]['draw_label']:
backend.draw_label(
self.nodes[node]['label'],
coord[node], labeloffset)
if nodepositions:
backend.draw_label(
str(node.pos),
coord[node],
positionoffset,
color=infocolor)
backend.output(
path=path,
show=show,
xscale=xscale,
yscale=yscale)
def draw(*diagrams, **params):
"""
Given any number of diagrams, generates their string
diagrams and draws them.
This is the same as generating the string diagram for each
diagram, and calling :meth:`StrDiag.draw` with the given
parameters on each one of them.
Arguments
---------
*diagrams : :class:`diagrams.Diagram | shapes.Shape | shapes.ShapeMap`
Any number of diagrams or shapes or shape maps.
Keyword arguments
-----------------
**params
Passed to :meth:`StrDiag.draw`.
"""
for diagram in diagrams:
StrDiag(diagram).draw(**params)
def draw_boundaries(diagram, dim=None, **params):
"""
Given a diagram, generates the string diagram of its input and
output boundaries of a given dimension, and draws them.
Arguments
---------
diagram : :class:`diagrams.Diagram | shapes.Shape | shapes.ShapeMap`
A diagram or a shape or a shape map.
dim : :class:`int`, optional
Dimension of the boundary (default is :code:`diagram.dim - 1`).
Keyword arguments
-----------------
*params
Passed to :meth:`StrDiag.draw`.
"""
StrDiag(diagram.boundary('-', dim)).draw(**params)
StrDiag(diagram.boundary('+', dim)).draw(**params)
def to_gif(diagram, *diagrams, **params):
"""
Given a non-zero number of diagrams, generates their string
diagrams and outputs a GIF animation of the sequence of their
visualisations.
Arguments
---------
diagram : :class:`diagrams.Diagram | shapes.Shape | shapes.ShapeMap`
A diagram or a shape or a shape map.
*diagrams : :class:`diagrams.Diagram | shapes.Shape | shapes.ShapeMap`
Any number of diagrams or shapes or shape maps.
Keyword arguments
-----------------
timestep : :class:`int`
The time step for the animation (default is :code:`1000`).
loop : :class:`bool`
Whether to loop around the animation (default is :code:`False`).
**params
Passed to :meth:`StrDiag.draw`.
"""
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
from PIL import Image
path = params.pop('path', None)
params.pop('show', False)
timestep = params.get('timestep', 1000)
loop = params.get('loop', False)
frames = []
path = path or os.path.basename(NamedTemporaryFile(
suffix='.gif', prefix='tmp_', dir='.').name)
with TemporaryDirectory() as directory:
for k, step in enumerate((diagram, *diagrams)):
tmp_path = os.path.join(directory, '{}.png'.format(k))
draw(step, path=tmp_path, show=False, **params)
frames.append(Image.open(tmp_path))
if loop:
frames = frames + frames[::-1]
frames[0].save(
path,
format='GIF',
append_images=frames[1:],
save_all=True,
duration=timestep,
**{'loop': 0} if loop else {})
| 25,559 | 36.151163 | 79 |
py
|
rewalt
|
rewalt-main/rewalt/diagrams.py
|
"""
Implements diagrammatic sets and diagrams.
"""
from rewalt import utils, shapes
from rewalt.ogposets import (El, GrSet, GrSubset)
from rewalt.shapes import (Shape, ShapeMap)
class DiagSet:
"""
Class for diagrammatic sets, a model of higher-dimensional rewrite
systems and/or directed cell complexes.
A diagrammatic set is constructed by creating an empty object, then
adding named *generators* of different dimensions. The addition of a
generator models the gluing of an atomic :class:`shapes.Shape` object
along its boundary.
This operation produces a *diagram*, that is, a map from a shape
to the diagrammatic set, modelled as a :class:`Diagram` object.
From these "basic" diagrams, we can construct "derived" diagrams
either by pasting, or by pulling back along shape maps (this is
used to produce "unit" or "degenerate" diagrams).
To add a 0-dimensional generator (a point), we just give it a name.
In the main constructor :meth:`add`, the gluing of an
:code:`n`-dimensional generator is specified by a pair of round,
:code:`(n-1)`-dimensional :class:`Diagram` objects, describing
the gluing maps for the input and output boundaries of a shape.
Simplicial sets, cubical sets with connections, and reflexive globular
sets are all special cases of diagrammatic sets, where the generators
have simplicial, cubical, or globular shapes.
There are special constructors :meth:`add_simplex` and
:meth:`add_cube` for adding simplicial and cubical generators by
listing all their faces.
The generators of a diagrammatic set are, by default, "directed" and
not invertible. The class supports a model of weak or pseudo-
invertibility, where two generators being each other's "weak inverse"
is witnessed by a pair of higher-dimensional generators (*invertors*).
This is produced by the methods :meth:`invert` (creates an inverse) and
:meth:`make_inverses` (makes an existing generator the inverse).
Diagrammatic sets do not have an intrinsic notion of composition
of diagrams, so they are not by themselves a model of higher categories.
However, the class supports a model of higher categories in which one
generator being the composite of a diagram is witnessed by a
higher-dimensional generator (a *compositor*). This is produced
by the methods :meth:`compose` (creates a composite) and
:meth:`make_composite` (makes an existing generator the composite).
Notes
-----
There is an alternative constructor :meth:`yoneda` which turns
a :class:`shapes.Shape` object into a diagrammatic set with one
generator for every face of the shape.
"""
_PRIVATE = (
'shape',
'mapping',
'faces',
'cofaces',
'inverse',
'linvertor',
'rinvertor',
'inverts',
'composite')
def __init__(self):
self._generators = dict()
self._by_dim = dict()
self._compositors = dict()
def __str__(self):
return '{} with {} generators'.format(
type(self).__name__, str(len(self)))
def __getitem__(self, key):
if key in self.generators:
return Diagram._new(
self.generators[key]['shape'],
self,
self.generators[key]['mapping'],
key)
raise KeyError(str(key))
def __contains__(self, item):
return item in self.generators.keys()
def __len__(self):
return len(self.generators)
def __iter__(self):
return iter(self.generators)
@property
def generators(self):
"""
Returns the object's internal representation of the set of
generators and related data.
This is a dictionary whose keys are the generators' names.
For each generator, the object stores another dictionary,
which contains at least
- the generator's shape (:code:`shape`, :class:`shapes.Shape`),
- the mapping of the shape (:code:`mapping`,
:class:`list[list[hashable]]`),
- the generator's set of "faces", that is, other generators
appearing as codimension-1 faces of the generator
(:code:`faces`, :class:`set[hashable]`),
- the generator's set of "cofaces", that is, other generators
that have the generator as a face (:code:`cofaces`,
:class:`set[hashable]`).
If the generator has been inverted, it will also contain
- its inverse's name (:code:`inverse`, :class:`hashable`),
- the left invertor's name (:code:`linvertor`, :class:`hashable`),
- the right invertor's name (:code:`rinvertor`, :class:`hashable`).
If the generator happens to be a compositor, it will also
contain the name of the composite it is exhibiting
(:code:`composite`, :class:`hashable`).
This also stores any additional keyword arguments passed when
adding the generator.
Returns
-------
generators : :class:`dict[dict]`
The generators data.
"""
return self._generators
@property
def by_dim(self):
"""
Returns the set of generators indexed by dimension.
Returns
-------
by_dim : :class:`dict[hashable]`
The set of generators indexed by dimension.
"""
return self._by_dim
@property
def compositors(self):
"""
Returns a dictionary of diagrams that have a non-trivial
composite, indexed by their compositor's name.
More precisely, rather than :class:`Diagram` objects,
the dictionary stores the :code:`shape` and :code:`mapping`
data that allows to reconstruct them.
Returns
-------
compositors : :class:`dict[dict]`
The dictionary of composed diagrams.
"""
return self._compositors
@property
def dim(self):
"""
Returns the maximal dimension of a generator.
Returns
-------
dim : :class:`int`
The maximal dimension of a generator, or :code:`-1` if empty.
"""
return max(self.by_dim, default=-1)
@property
def issimplicial(self):
"""
Returns whether the diagrammatic sets is simplicial, that is,
all its generators are simplex-shaped.
Returns
-------
issimplicial : :class:`bool`
:code:`True` if and only if the shape of every generator is
a :class:`shapes.Simplex` object.
"""
for x in self:
if not isinstance(self.generators[x]['shape'], shapes.Simplex):
return False
return True
@property
def iscubical(self):
"""
Returns whether the diagrammatic sets is cubical, that is,
all its generators are cube-shaped.
Returns
-------
iscubical : :class:`bool`
:code:`True` if and only if the shape of every generator is
a :class:`shapes.Cube` object.
"""
for x in self:
if not isinstance(self.generators[x]['shape'], shapes.Cube):
return False
return True
def add(self, name, input=None, output=None, **kwargs):
"""
Adds a generator and returns the diagram that maps the new
generator into the diagrammatic set.
The gluing of the generator is specified by a pair of round
diagrams with identical boundaries, corresponding to the input
and output diagrams of the new generator. If none are given,
adds a point (0-dimensional generator).
Arguments
---------
name : :class:`hashable`
Name to assign to the new generator.
input : :class:`Diagram`, optional
The input diagram of the new generator (default is :code:`None`)
output : :class:`Diagram`, optional
The output diagram of the new generator (default is :code:`None`)
Keyword arguments
-----------------
color : multiple types
Fill color when pictured as a node in string diagrams.
If :code:`stroke` is not specified, this is
also the color when pictured as a wire.
stroke : multiple types
Stroke color when pictured as a node, and color when pictured
as a wire.
draw_node : :class:`bool`
If :code:`False`, no node is drawn when picturing the
generator in string diagrams.
draw_label : :class:`bool`
If :code:`False`, no label is drawn when picturing the
generator in string diagrams.
Returns
-------
generator : :class:`Diagram`
The diagram picking the new generator.
Raises
------
:class:`ValueError`
If the name is already in use, or the input and output diagrams
do not have round and matching boundaries.
"""
if name in self.generators:
raise ValueError(utils.value_err(
name, 'name already in use'))
for key in self._PRIVATE:
kwargs.pop(key, None)
if input is None:
input = Diagram(self)
if output is None:
output = Diagram(self)
for x in (input, output):
utils.typecheck(x, {
'type': Diagram,
'st': lambda x: x.ambient == self,
'why': 'not a diagram in {}'.format(repr(self))})
boundary = Shape.atom(
input.shape, output.shape, cospan=True)
shape = boundary.target
mapping = [
[None for _ in n_data]
for n_data in shape.face_data
]
for x in input.shape:
y = boundary.fst[x]
mapping[y.dim][y.pos] = input.mapping[x.dim][x.pos]
for x in output.shape:
y = boundary.snd[x]
if mapping[y.dim][y.pos] is None:
mapping[y.dim][y.pos] = output.mapping[x.dim][x.pos]
else:
if mapping[y.dim][y.pos] != output.mapping[x.dim][x.pos]:
raise ValueError(utils.value_err(
output, 'boundary does not match '
'boundary of {}'.format(repr(input))))
mapping[-1][0] = name
self._update_generators(name, shape, mapping, **kwargs)
return self[name]
def add_simplex(self, name, *faces, **kwargs):
"""
Variant of :meth:`add` for simplex-shaped generators.
The gluing of the generator is specified by a number of
:class:`SimplexDiagram` objects, corresponding to the faces
of the new generator as listed by
:class:`SimplexDiagram.simplex_face`.
Arguments
---------
name : :class:`hashable`
Name to assign to the new generator.
*faces : :class:`SimplexDiagram`
The simplicial faces of the new generator.
Keyword arguments
-----------------
**kwargs
Same as :meth:`add`.
Returns
-------
generator : :class:`SimplexDiagram`
The diagram picking the new generator.
Raises
------
:class:`ValueError`
If the name is already in use, or the faces do not have
matching boundaries.
"""
if len(faces) <= 1:
return self.add(name, **kwargs)
if name in self.generators:
raise ValueError(utils.value_err(
name, 'name already in use'))
for key in self._PRIVATE:
kwargs.pop(key, None)
dim = len(faces) - 1
for x in faces:
utils.typecheck(x, {
'type': SimplexDiagram,
'st': lambda x: x.ambient == self and x.dim == dim-1,
'why': 'expecting a {}-simplex in {}'.format(
str(dim - 1), repr(self))})
shape = Shape.simplex(dim)
mapping = [
[None for _ in n_data]
for n_data in shape.face_data
]
for k, face in enumerate(faces):
face_map = shape.simplex_face(k)
for x in face:
y = face_map[x]
if mapping[y.dim][y.pos] is None:
mapping[y.dim][y.pos] = face.mapping[x.dim][x.pos]
else:
if mapping[y.dim][y.pos] != face.mapping[x.dim][x.pos]:
raise ValueError(utils.value_err(
face, 'boundary of face does not '
'match other faces'))
mapping[-1][0] = name
self._update_generators(name, shape, mapping, **kwargs)
return self[name]
def add_cube(self, name, *faces, **kwargs):
"""
Variant of :meth:`add` for cube-shaped generators.
The gluing of the generator is specified by a number of
:class:`CubeDiagram` objects, corresponding to the faces
of the new generator as listed by
:class:`CubeDiagram.cube_face`, in the order
:code:`(0, '-')`, :code:`(0, '+')`, :code:`(1, '-')`,
:code:`(1, '+')`, etc.
Arguments
---------
name : :class:`hashable`
Name to assign to the new generator.
*faces : :class:`CubeDiagram`
The cubical faces of the new generator.
Keyword arguments
-----------------
**kwargs
Same as :meth:`add`.
Returns
-------
generator : :class:`CubeDiagram`
The diagram picking the new generator.
Raises
------
:class:`ValueError`
If the name is already in use, or the faces do not have
matching boundaries.
"""
if len(faces) % 2 == 1:
raise ValueError(utils.value_err(
faces, 'expecting an even number of faces'))
if name in self.generators:
raise ValueError(utils.value_err(
name, 'name already in use'))
for key in self._PRIVATE:
kwargs.pop(key, None)
dim = int(len(faces) / 2)
for x in faces:
utils.typecheck(x, {
'type': CubeDiagram,
'st': lambda x: x.ambient == self and x.dim == dim-1,
'why': 'expecting a {}-cube in {}'.format(
str(dim - 1), repr(self))})
shape = Shape.cube(dim)
mapping = [
[None for _ in n_data]
for n_data in shape.face_data
]
for n, face in enumerate(faces):
k = int(n/2)
sign = utils.mksign(n % 2)
face_map = shape.cube_face(k, sign)
for x in face:
y = face_map[x]
if mapping[y.dim][y.pos] is None:
mapping[y.dim][y.pos] = face.mapping[x.dim][x.pos]
else:
if mapping[y.dim][y.pos] != face.mapping[x.dim][x.pos]:
raise ValueError(utils.value_err(
face, 'boundary of face does not '
'match other faces'))
mapping[-1][0] = name
self._update_generators(name, shape, mapping, **kwargs)
return self[name]
def invert(self, generatorname,
inversename=None,
rinvertorname=None,
linvertorname=None,
**kwargs):
"""
Adds a weak inverse for a generator, together
with left and right invertors that witness the
inversion, and returns them as diagrams.
Both the inverse and the invertors can be given custom names.
If the generator to be inverted is named :code:`'a'`, the
default names are
- :code:`'a⁻¹'` for the inverse,
- :code:`'inv(a, a⁻¹)'` for the right invertor,
- :code:`'inv(a⁻¹, a)'` for the left invertor.
In the theory of diagrammatic sets, weak invertibility would
correspond to the situation where the invertors themselves
are weakly invertible, coinductively.
In the implementation, we take an "invert when necessary"
approach, where invertors are not invertible by default, and
should be inverted when needed.
Notes
-----
The right invertor for the generator is the left invertor
for its inverse, and the left invertor for the generator is the
right invertor for its inverse.
Arguments
---------
generatorname : :class:`hashable`
Name of the generator to invert.
inversename : :class:`hashable`, optional
Name assigned to the inverse.
rinvertorname : :class:`hashable`, optional
Name assigned to the right invertor.
linvertorname : :class:`hashable`, optional
Name assigned to the left invertor.
Keyword arguments
-----------------
**kwargs
Passed to :meth:`add` when adding the inverse.
Returns
-------
inverse : :class:`Diagram`
The diagram picking the inverse.
rinvertor : :class:`Diagram`
The diagram picking the right invertor.
linvertor : :class:`Diagram`
The diagram picking the left invertor.
Raises
------
:class:`ValueError`
If the generator is already inverted, or 0-dimensional.
"""
if isinstance(generatorname, Diagram):
generatorname = generatorname.name
generator = self[generatorname]
if generator.dim == 0:
raise ValueError(utils.value_err(
generatorname, 'cannot invert 0-cell'))
if generator.isinvertiblecell:
raise ValueError(utils.value_err(
generatorname, 'already inverted'))
if inversename is None:
inversename = '{}⁻¹'.format(str(generatorname))
if rinvertorname is None:
rinvertorname = 'inv({}, {})'.format(
str(generatorname), str(inversename))
if linvertorname is None:
linvertorname = 'inv({}, {})'.format(
str(inversename), str(generatorname))
for x in (inversename, rinvertorname, linvertorname):
if x in self.generators:
raise ValueError(utils.value_err(
generatorname, 'name already in use'))
inverse = self.add(
inversename,
generator.output,
generator.input,
**kwargs)
rinvertor = self.add(
rinvertorname,
generator.paste(inverse),
generator.input.unit())
linvertor = self.add(
linvertorname,
inverse.paste(generator),
generator.output.unit())
self._generators[generatorname].update({
'inverse': inversename,
'rinvertor': rinvertorname,
'linvertor': linvertorname})
self._generators[inversename].update({
'inverse': generatorname,
'rinvertor': linvertorname,
'linvertor': rinvertorname})
self._generators[rinvertorname].update({
'inverts': (generatorname, inversename)})
self._generators[linvertorname].update({
'inverts': (inversename, generatorname)})
return inverse, rinvertor, linvertor
def make_inverses(self, generatorname1, generatorname2,
rinvertorname=None,
linvertorname=None):
"""
Makes two generators each other's weak inverse by adding
invertors, and returns the invertors.
In what follows, "right/left" invertors are relative to
the first generator.
Both invertors can be given custom names.
If the generators are named :code:`'a'`, :code:`'b'`, the
default names for the invertors are
- :code:`'inv(a, b)'` for the right invertor,
- :code:`'inv(b, a)'` for the left invertor.
In the theory of diagrammatic sets, weak invertibility would
correspond to the situation where the invertors themselves
are weakly invertible, coinductively.
In the implementation, we take an "invert when necessary"
approach, where invertors are not invertible by default, and
should be inverted when needed.
Arguments
---------
generatorname1 : :class:`hashable`
Name of the first generator.
generatorname2 : :class:`hashable`, optional
Name of the second generator.
rinvertorname : :class:`hashable`, optional
Name assigned to the right invertor.
linvertorname : :class:`hashable`, optional
Name assigned to the left invertor.
Returns
-------
rinvertor : :class:`Diagram`
The diagram picking the right invertor.
linvertor : :class:`Diagram`
The diagram picking the left invertor.
Raises
------
:class:`ValueError`
If the generators are already inverted, or 0-dimensional,
or do not have compatible boundaries.
"""
for x in (generatorname1, generatorname2):
if isinstance(x, Diagram):
x = x.name
generator1 = self[generatorname1]
generator2 = self[generatorname2]
selfinverse = generatorname1 == generatorname2
for x in (generator1, generator2):
if x.isinvertiblecell:
raise ValueError(utils.value_err(
x.name, 'already inverted'))
rpaste = generator1.paste(generator2)
if rinvertorname is None:
rinvertorname = 'inv({}, {})'.format(
str(generatorname1), str(generatorname2))
if selfinverse:
lpaste = rpaste
linvertorname = rinvertorname
else:
lpaste = generator2.paste(generator1)
if linvertorname is None:
linvertorname = 'inv({}, {})'.format(
str(generatorname2), str(generatorname1))
for x in (rinvertorname, linvertorname):
if x in self.generators:
raise ValueError(utils.value_err(
x, 'name already in use'))
rinvertor = self.add(
rinvertorname,
rpaste,
generator1.input.unit())
if selfinverse:
linvertor = rinvertor
else:
linvertor = self.add(
linvertorname,
lpaste,
generator2.input.unit())
self._generators[generatorname1].update({
'inverse': generatorname2,
'rinvertor': rinvertorname,
'linvertor': linvertorname})
self._generators[rinvertorname].update({
'inverts': (generatorname1, generatorname2)})
if not selfinverse:
self._generators[generatorname2].update({
'inverse': generatorname1,
'rinvertor': linvertorname,
'linvertor': rinvertorname})
self._generators[linvertorname].update({
'inverts': (generatorname2, generatorname1)})
return rinvertor, linvertor
def compose(self, diagram,
name=None, compositorname=None,
**kwargs):
"""
Given a round diagram, adds a weak composite for it,
together with a compositor witnessing the composition, and
returns them as diagrams.
Both the composite and the compositor can be given custom names.
If the diagram to be composed is named :code:`'a'`, the
default names are
- :code:`'⟨a⟩'` for the composite,
- :code:`'comp(a)'` for the compositor.
In the theory of diagrammatic sets, a weak composite is
witnessed by a weakly invertible compositor.
In the implementation, we take an "invert when necessary"
approach, where compositors are not invertible by default, and
should be inverted when needed.
Notes
-----
A cell (a diagram whose shape is an atom) is treated as already
having itself as a composite, witnessed by a unit cell; this
method can only be used on non-atomic diagrams.
Arguments
---------
diagram : :class:`Diagram`
The diagram to compose.
name : :class:`hashable`, optional
Name of the weak composite.
compositorname : :class:`hashable`, optional
Name of the compositor.
Keyword arguments
-----------------
**kwargs
Passed to :meth:`add` when adding the composite.
Returns
-------
composite : :class:`Diagram`
The diagram picking the composite.
compositor : :class:`Diagram`
The diagram picking the compositor.
Raises
------
:class:`ValueError`
If the diagram is not round, or already has a composite.
"""
utils.typecheck(diagram, {
'type': Diagram,
'st': lambda x: x.shape.isround,
'why': 'composable diagrams must have round shape'})
if diagram.hascomposite:
raise ValueError(utils.value_err(
diagram, 'already has a composite'))
if name is None:
name = '⟨{}⟩'.format(str(diagram.name))
if compositorname is None:
compositorname = 'comp({})'.format(str(diagram.name))
for x in (name, compositorname):
if x in self.generators:
raise ValueError(utils.value_err(
x, 'name already in use'))
composite = self.add(
name,
diagram.input,
diagram.output,
**kwargs)
compositor = self.add(
compositorname,
diagram,
composite)
self._generators[compositorname].update({
'composite': name})
self._compositors.update({
compositorname: {
'shape': diagram.shape,
'mapping': diagram.mapping}
})
return composite, compositor
def make_composite(self, generatorname, diagram,
compositorname=None):
"""
Given a generator and a round diagram, it makes the first
the weak composite of the second by adding a compositor, and
returns the compositor as a diagram.
The compositor can be given a custom name.
If the diagram to be composed is named :code:`'a'`, the
default name is :code:`'comp(a)'`.
In the theory of diagrammatic sets, a weak composite is
witnessed by a weakly invertible compositor.
In the implementation, we take an "invert when necessary"
approach, where compositors are not invertible by default, and
should be inverted when needed.
Notes
-----
A cell (a diagram whose shape is an atom) is treated as already
having itself as a composite, witnessed by a unit cell; this
method can only be used on non-atomic diagrams.
Arguments
---------
generatorname : :class:`hashable`
Name of the generator that should be its composite.
diagram : :class:`Diagram`
The diagram to compose.
compositorname : :class:`hashable`, optional
Name of the compositor.
Returns
-------
compositor : :class:`Diagram`
The diagram picking the compositor.
Raises
------
:class:`ValueError`
If the diagram is not round, or already has a composite, or
the diagram and the generator do not have matching boundaries.
"""
if isinstance(generatorname, Diagram):
generatorname = generatorname.name
utils.typecheck(diagram, {
'type': Diagram,
'st': lambda x: x.shape.isround,
'why': 'composable diagrams must have round shape'})
if diagram.hascomposite:
raise ValueError(utils.value_err(
diagram, 'already has a composite'))
if compositorname is None:
compositorname = 'comp({})'.format(str(diagram.name))
if compositorname in self.generators:
raise ValueError(utils.value_err(
compositorname, 'name already in use'))
generator = self[generatorname]
compositor = self.add(
compositorname,
diagram,
generator)
self._generators[compositorname].update({
'composite': generatorname})
self._compositors.update({
compositorname: {
'shape': diagram.shape,
'mapping': diagram.mapping}
})
return compositor
def remove(self, generatorname):
"""
Removes a generator, together with all other generators
that depend on it.
Arguments
---------
generatorname : :class:`hashable`
Name of the generator to remove.
"""
if isinstance(generatorname, Diagram):
generatorname = generatorname.name
to_remove = [*self.generators[generatorname]['cofaces']]
for x in to_remove:
self.remove(x)
dim = self[generatorname].dim
self._by_dim[dim].remove(generatorname)
if len(self._by_dim[dim]) == 0:
self._by_dim.pop(dim, None)
if 'inverse' in self.generators[generatorname]:
inverse = self.generators[generatorname]['inverse']
self.generators[inverse].pop('inverse', None)
self.generators[inverse].pop('linvertor', None)
self.generators[inverse].pop('rinvertor', None)
if 'inverts' in self.generators[generatorname]:
fst, snd = self.generators[generatorname]['inverts']
linvertor = self.generators[fst]['linvertor']
for x in (fst, snd):
self.generators[x].pop('inverse', None)
self.generators[x].pop('linvertor', None)
self.generators[x].pop('rinvertor', None)
self.generators[linvertor].pop('inverts', None)
if 'composite' in self.generators[generatorname]:
# This does not remove the composite!
self.compositors.pop(generatorname, None)
for x in self.generators[generatorname]['faces']:
self.generators[x]['cofaces'].remove(generatorname)
self.generators.pop(generatorname, None)
def update(self, generatorname, **kwargs):
"""
Updates the optional arguments of a generator.
Arguments
---------
generatorname : :class:`hashable`
Name of the generator to update.
Keyword arguments
-----------------
**kwargs
Any arguments to update.
Raises
------
:class:`AttributeError`
If the optional argument uses a private keyword.
"""
if isinstance(generatorname, Diagram):
generatorname = generatorname.name
for key in self._PRIVATE:
if key in kwargs:
raise AttributeError(key, 'private attribute')
self.generators[generatorname].update(kwargs)
def copy(self):
"""
Returns a copy of the object.
Returns
-------
copy : :class:`DiagSet`
A copy of the object.
"""
new = DiagSet()
new._generators = self.generators.copy()
new._by_dim = self.by_dim.copy()
new._compositors = self.compositors.copy()
return new
@staticmethod
def yoneda(shape):
"""
Alternative constructor creating a diagrammatic set from
a :class:`shapes.Shape`.
Mathematically, diagrammatic sets are certain sheaves on the
category of shapes and maps of shapes; this constructor
implements the Yoneda embedding of a shape.
This has an `n`-dimensional generator for each `n`-dimensional
element of the shape.
Arguments
---------
shape : :class:`shapes.Shape`
A shape.
Returns
-------
yoneda : :class:`DiagSet`
The Yoneda-embedded shape.
"""
utils.typecheck(shape, {'type': Shape})
yoneda = DiagSet()
for x in shape:
atom = shape.atom_inclusion(x)
yoneda._generators.update({
x: {
'shape': atom.source,
'mapping': atom.mapping,
'faces': {y for y in shape.faces(x)},
'cofaces': {y for y in shape.cofaces(x)}
}
})
for n, n_size in enumerate(shape.size):
yoneda._by_dim.update({
n: {
El(n, k) for k in range(n_size)
}
})
return yoneda
# Internal methods
def _update_generators(self, name, shape, mapping, **kwargs):
self._generators.update({
name: {
'shape': shape,
'mapping': mapping,
'faces': set(),
'cofaces': set(),
**kwargs
}
})
if shape.dim in self._by_dim:
self._by_dim[shape.dim].add(name)
else:
self._by_dim[shape.dim] = {name}
if shape.dim > 0:
for x in mapping[-2]:
self.generators[x]['cofaces'].add(name)
self.generators[name]['faces'].add(x)
class Diagram:
"""
Class for diagrams, that is, mappings from a shape to an
"ambient" diagrammatic set.
To create a diagram, we start from *generators*
of a diagrammatic set, returned by the :meth:`DiagSet.add`
method or requested with indexer operators.
Then we produce other diagrams in two main ways:
- pulling back a diagram along a map of shapes
(:meth:`pullback`), or
- pasting together two diagrams along their boundaries
(:meth:`paste`, :meth:`to_inputs`, :meth:`to_outputs`).
In practice, the direct use of :meth:`pullback`, which requires
an explicit shape map, can be avoided in common cases by using
:meth:`unit`, :meth:`lunitor`, :meth:`runitor`, or the
specialised :class:`SimplexDiagram.simplex_degeneracy`,
:class:`CubeDiagram.cube_degeneracy`, and
:class:`CubeDiagram.cube_connection` methods.
Notes
-----
Initialising a :class:`Diagram` directly creates an empty
diagram in a given diagrammatic set.
Arguments
---------
ambient : :class:`DiagSet`
The ambient diagrammatic set.
"""
def __init__(self, ambient):
utils.typecheck(ambient, {'type': DiagSet})
self._shape = Shape.empty()
self._ambient = ambient
self._mapping = []
self._name = ''
def __str__(self):
return str(self.name)
def __eq__(self, other):
return isinstance(other, Diagram) and \
self.shape == other.shape and \
self.ambient == other.ambient and \
self.mapping == other.mapping
def __len__(self):
return len(self.shape)
def __getitem__(self, element):
if element in self.shape:
return self.mapping[element.dim][element.pos]
raise ValueError(utils.value_err(
element, 'not an element of the shape'))
def __contains__(self, item):
return item in self.shape
def __iter__(self):
return iter(self.shape)
@property
def name(self):
"""
Returns the name of the diagram.
Returns
-------
name : :class:`hashable`
The name of the diagram.
"""
return self._name
@property
def shape(self):
"""
Returns the shape of the diagram.
Returns
-------
shape : :class:`shapes.Shape`
The shape of the diagram.
"""
return self._shape
@property
def ambient(self):
"""
Returns the ambient diagrammatic set.
Returns
-------
ambient : :class:`DiagSet`
The ambient diagrammatic set.
"""
return self._ambient
@property
def mapping(self):
"""
Returns the data specifying the mapping of shape elements to
generators.
The mapping is specified as a list of lists, similar to
:class:`ogposets.OgMap`, in the following way:
:code:`mapping[n][k] == s` if the diagram sends :code:`El(n, k)`
to the generator named :code:`s`.
Returns
-------
mapping : :class:`list[list[hashable]]`
The data specifying the diagram's assignment.
"""
return self._mapping
@property
def layers(self):
"""
Returns the layering of the diagram corresponding to the current
layering of the shape.
Returns
-------
layers : :class:`list[Diagram]`
The current layering.
"""
if not hasattr(self.shape, '_layering'):
return [self]
return [
self.pullback(f, 'layer {} of {}'.format(
str(n), self.name))
for n, f in enumerate(self.shape._layering)]
@property
def rewrite_steps(self):
"""
Returns the sequence of rewrite steps associated to the current
layering of the diagram.
The :code:`0`-th rewrite step is the input boundary of the diagram.
For :code:`n > 0`, the :code:`n`-th rewrite step is the output
boundary of the :code:`(n-1)`-th layer.
Returns
-------
rewrite_steps : :class:`list[Diagram]`
The current sequence of rewrite steps.
"""
rewrite_steps = [
*[layer.input for layer in self.layers],
self.layers[-1].output
]
for n, step in enumerate(rewrite_steps):
step.rename('step {} of {}'.format(
str(n), self.name))
return rewrite_steps
@property
def dim(self):
"""
Shorthand for :code:`shape.dim`.
"""
return self.shape.dim
@property
def isdegenerate(self):
"""
Returns whether the diagram is *degenerate*, that is, its
image has dimension strictly lower than the dimension of its shape.
Returns
-------
isdegenerate : :class:`bool`
:code:`True` if and only if the diagram is degenerate.
"""
if self.dim <= 0:
return False
for x in self.mapping[-1]:
if self.ambient[x].dim == self.dim:
return False
return True
@property
def isround(self):
"""
Shorthand for :code:`shape.isround`.
"""
return self.shape.isround
@property
def iscell(self):
"""
Shorthand for :code:`shape.isatom` (a *cell* is a diagram
whose shape is an atom).
"""
return self.shape.isatom
@property
def isinvertiblecell(self):
"""
Returns whether the diagram is an invertible cell.
A cell is invertible if either
- it is degenerate, or
- its image is an invertible generator.
Returns
-------
isinvertiblecell : :class:`bool`
:code:`True` if and only if the diagram is an invertible cell.
"""
if self.iscell:
if self.isdegenerate:
return True
if 'inverse' in self.ambient.generators[self.mapping[-1][0]]:
return True
return False
@property
def hascomposite(self):
"""
Returns whether the diagram has a composite.
Returns
-------
hascomposite : :class:`bool`
:code:`True` if and only if the diagram has a composite.
"""
if self.iscell:
return True
if self._find_compositor() is None:
return False
return True
def rename(self, name):
"""
Renames the diagram.
Arguments
---------
name : :class:`hashable`
The new name for the diagram.
"""
self._name = name
# Methods for creating new diagrams
def paste(self, other, dim=None, **params):
"""
Given two diagrams and :code:`k` such that the output
:code:`k`-boundary of the first is equal to the input
:code:`k`-boundary of the second, returns their pasting along
the matching boundaries.
Arguments
---------
fst : :class:`Diagram`
The first diagram.
snd : :class:`Diagram`
The second diagram.
dim : :class:`int`, optional
The dimension of the boundary along which they will be pasted
(default is :code:`min(fst.dim, snd.dim) - 1`).
Keyword arguments
-----------------
cospan : :class:`bool`
Whether to also return the cospan of inclusions of the two
diagrams' shapes into the pasting (default is :code:`False`).
Returns
-------
paste : :class:`Diagram`
The pasted diagram.
paste_cospan : :class:`ogposets.OgMapPair`, optional
The cospan of inclusions of the two diagrams' shapes into
their pasting.
Raises
------
:class:`ValueError`
If the boundaries do not match.
"""
cospan = params.get('cospan', False)
utils.typecheck(other, {
'type': Diagram,
'st': lambda x: x.ambient == self.ambient,
'why': 'not the same ambient DiagSet'})
if dim is None:
dim = min(self.dim, other.dim) - 1
paste_cospan = Shape.paste(
self.shape, other.shape, dim, cospan=True)
shape = paste_cospan.target
mapping = Diagram._paste_fill_mapping(
self, other, paste_cospan)
name = '({}) #{} ({})'.format(
str(self.name), str(dim), str(other.name))
pasted = Diagram._new(
shape,
self.ambient,
mapping,
name)
if cospan:
return pasted, paste_cospan
return pasted
def to_outputs(self, positions, other, dim=None, **params):
"""
Returns the pasting of another diagram along a round subshape of
the output :code:`k`-boundary, specified by the positions of its
:code:`k`-dimensional elements.
Arguments
---------
positions : :class:`list[int]` | :class:`int`
The positions of the outputs along which to paste. If given
an integer :code:`n`, interprets it as the list :code:`[n]`.
other : :class:`Diagram`
The other diagram to paste.
dim : :class:`int`, optional
The dimension of the boundary along which to paste
(default is :code:`self.dim - 1`)
Keyword arguments
-----------------
cospan : :class:`bool`
Whether to return the cospan of inclusions of the two diagrams'
shapes into the pasting (default is :code:`False`).
Returns
-------
to_outputs : :class:`Shape`
The pasted diagram.
paste_cospan : :class:`ogposets.OgMapPair`, optional
The cospan of inclusions of the two diagrams' shapes into
their pasting.
Raises
------
:class:`ValueError`
If the boundaries do not match, or the pasting does not produce
a well-formed shape.
"""
cospan = params.get('cospan', False)
utils.typecheck(other, {
'type': Diagram,
'st': lambda x: x.ambient == self.ambient,
'why': 'not the same ambient DiagSet'})
if isinstance(positions, int):
positions = [positions]
if dim is None:
dim = self.dim-1
paste_cospan = self.shape.to_outputs(
positions, other.shape, dim, cospan=True)
shape = paste_cospan.target
mapping = Diagram._paste_fill_mapping(
self, other, paste_cospan)
name = '{{{} -> {}{}}}({})'.format(
str(other.name), str(dim),
str(sorted(positions)), str(self.name))
pasted = Diagram._new(
shape,
self.ambient,
mapping,
name)
if cospan:
return pasted, paste_cospan
return pasted
def to_inputs(self, positions, other, dim=None, **params):
"""
Returns the pasting of another diagram along a round subshape of
the input :code:`k`-boundary, specified by the positions of its
:code:`k`-dimensional elements.
Arguments
---------
positions : :class:`list[int]` | :class:`int`
The positions of the inputs along which to paste. If given
an integer :code:`n`, interprets it as the list :code:`[n]`.
other : :class:`Diagram`
The other diagram to paste.
dim : :class:`int`, optional
The dimension of the boundary along which to paste
(default is :code:`self.dim - 1`)
Keyword arguments
-----------------
cospan : :class:`bool`
Whether to return the cospan of inclusions of the two diagrams'
shapes into the pasting (default is :code:`False`).
Returns
-------
to_inputs : :class:`Shape`
The pasted diagram.
paste_cospan : :class:`ogposets.OgMapPair`, optional
The cospan of inclusions of the two diagrams' shapes into
their pasting.
Raises
------
:class:`ValueError`
If the boundaries do not match, or the pasting does not produce
a well-formed shape.
"""
cospan = params.get('cospan', False)
utils.typecheck(other, {
'type': Diagram,
'st': lambda x: x.ambient == self.ambient,
'why': 'not the same ambient DiagSet'})
if isinstance(positions, int):
positions = [positions]
if dim is None:
dim = self.dim-1
paste_cospan = self.shape.to_inputs(
positions, other.shape, dim, cospan=True)
shape = paste_cospan.target
mapping = Diagram._paste_fill_mapping(
other, self, paste_cospan)
name = '({}){{{}{} <- {}}}'.format(
str(self.name), str(sorted(positions)),
str(dim), str(other.name))
pasted = Diagram._new(
shape,
self.ambient,
mapping,
name)
if cospan:
return pasted, paste_cospan
return pasted
def rewrite(self, positions, diagram):
"""
Returns the diagram representing the application of a
higher-dimensional rewrite to a subdiagram, specified
by the positions of its top-dimensional elements.
This is in fact an alias for :meth:`to_outputs` in the top
dimension, reflecting the intuitions of higher-dimensional
rewriting in this situation.
Arguments
---------
positions : :class:`list[int]` | :class:`int`
The positions of the top-dimensional elements to rewrite.
If given an integer :code:`n`, interprets it as the list
:code:`[n]`.
diagram : :class:`Diagram`
The diagram representing the rewrite to apply.
Returns
-------
rewrite : :class:`Shape`
The diagram representing the application of the rewrite to
the given positions.
"""
return self.to_outputs(positions, diagram, self.dim)
def pullback(self, shapemap, name=None):
"""
Returns the pullback of the diagram along a shape map.
Arguments
---------
shapemap : :class:`shapes.ShapeMap`
The map along which to pull back.
name : :class:`hashable`, optional
The name to give to the new diagram.
Returns
-------
pullback : :class:`Diagram`
The pulled back diagram.
Raises
------
:class:`ValueError`
If the target of the map is not equal to the diagram shape.
"""
utils.typecheck(shapemap, {
'type': ShapeMap,
'st': lambda x: x.target == self.shape,
'why': 'target does not match diagram shape'})
shape = shapemap.source
mapping = [
[
self.mapping[x.dim][x.pos]
for x in n_data
]
for n_data in shapemap.mapping]
if name is None:
name = 'pullback of {}'.format(str(self.name))
return Diagram._new(
shape,
self.ambient,
mapping,
name)
def boundary(self, sign, dim=None):
"""
Returns the boundary of a given orientation and dimension.
This is, by definition, the pullback of a diagram along the
inclusion map :code:`self.shape.boundary(sign, dim)`.
Arguments
---------
sign : :class:`str`
Orientation: :code:`'-'` for input, :code:`'+'` for output.
dim : :class:`int`, optional
Dimension of the boundary (default is :code:`self.dim - 1`).
Returns
-------
boundary : :class:`Diagram`
The requested boundary.
"""
if dim is None:
dim = self.dim - 1
sign = utils.mksign(sign)
name = '∂[{},{}]({})'.format(
sign, str(dim), str(self.name))
return self.pullback(self.shape.boundary(
sign, dim), name)
@property
def input(self):
"""
Alias for :code:`boundary('-')`.
"""
return self.boundary('-')
@property
def output(self):
"""
Alias for :code:`boundary('+')`.
"""
return self.boundary('+')
def unit(self):
"""
Returns the unit on the diagram: a degenerate diagram one
dimension higher, with input and output equal to the diagram.
This is, by definition, the pullback of the diagram along
:code:`self.shape.inflate()`.
Returns
-------
unit : :class:`Diagram`
The unit diagram.
"""
name = '1({})'.format(str(self.name))
return self.pullback(self.shape.inflate(), name)
def lunitor(self, sign='-', positions=None):
"""
Returns a left unitor on the diagram:
a degenerate diagram one dimension higher, with one
boundary equal to the diagram, and the other equal to the
diagram with units pasted to some of its inputs.
Arguments
---------
sign : :class:`str`, optional
The boundary on which the units are: :code:`'-'` (default)
for input, :code:`'+'` for output.
positions : :class:`list[int]` | :class:`int`
The positions of the inputs to which a unit is attached
(default is *all* of the inputs). If given
an integer :code:`n`, interprets it as the list :code:`[n]`.
Returns
-------
lunitor : :class:`Diagram`
The left unitor diagram.
Raises
------
:class:`ValueError`
If the positions do not correspond to inputs.
"""
sign = utils.mksign(sign)
dim = self.dim - 1
all = self.shape.all()
output = all.boundary('+')
collapsed = output
if positions is not None:
if isinstance(positions, int):
positions = [positions]
input = all.boundary('-')
notcollapsed = GrSubset(
GrSet(
*[El(dim, k) for k in positions]),
self.shape)
if not notcollapsed.issubset(input):
raise ValueError(utils.value_err(
positions, 'not in the input boundary'))
notcollapsed = notcollapsed.closure()
collapsed = collapsed.union(
input.difference(notcollapsed).closure())
unitor_map = self.shape.inflate(
collapsed)
if positions is None:
name = 'λ({})'.format(str(self.name))
else:
name = 'λ{}({})'.format(
str(sorted(positions)),
str(self.name))
if sign == '-':
unitor_map = unitor_map.dual(self.dim + 1)
else:
name = '({})⁻¹'.format(name)
return self.pullback(unitor_map, name)
def runitor(self, sign='-', positions=None):
"""
Returns a right unitor on the diagram:
a degenerate diagram one dimension higher, with one
boundary equal to the diagram, and the other equal to the
diagram with units pasted to some of its outputs.
Arguments
---------
sign : :class:`str`, optional
The boundary on which the units are: :code:`'-'` (default)
for input, :code:`'+'` for output.
positions : :class:`list[int]` | :class:`int`
The positions of the outputs to which a unit is attached
(default is *all* of the outputs). If given
an integer :code:`n`, interprets it as the list :code:`[n]`.
Returns
-------
runitor : :class:`Diagram`
The right unitor diagram.
Raises
------
:class:`ValueError`
If the positions do not correspond to outputs.
"""
sign = utils.mksign(sign)
dim = self.dim - 1
all = self.shape.all()
input = all.boundary('-')
collapsed = input
if positions is not None:
if isinstance(positions, int):
positions = [positions]
output = all.boundary('+')
notcollapsed = GrSubset(
GrSet(
*[El(dim, k) for k in positions]),
self.shape)
if not notcollapsed.issubset(output):
raise ValueError(utils.value_err(
positions, 'not in the output boundary'))
notcollapsed = notcollapsed.closure()
collapsed = collapsed.union(
output.difference(notcollapsed).closure())
unitor_map = self.shape.inflate(
collapsed)
if positions is None:
name = 'ρ({})'.format(str(self.name))
else:
name = 'ρ{}({})'.format(
str(sorted(positions)),
str(self.name))
if sign == '+':
unitor_map = unitor_map.dual(self.dim + 1)
name = '({})⁻¹'.format(name)
return self.pullback(unitor_map, name)
@property
def inverse(self):
"""
Returns the inverse of an invertible cell.
Returns
-------
inverse : :class:`Diagram`
The inverse cell.
Raises
------
:class:`ValueError`
If the diagram is not an invertible cell.
"""
if not self.isinvertiblecell:
raise ValueError(utils.value_err(
self, 'not an invertible cell'))
if not self.isdegenerate:
top = self.mapping[-1][0]
top_inv = self.ambient.generators[top]['inverse']
if self.shape == self.ambient.generators[top]['shape']:
return self.ambient[top_inv]
reordering = Shape.dual(self.shape, self.dim,
reordering=True)
shape = reordering.source
mapping = [
[self[x] for x in n_data]
for n_data in reordering.mapping
]
if not self.isdegenerate:
mapping[-1][0] = top_inv
name = '({})⁻¹'.format(self.name)
return Diagram._new(
shape,
self.ambient,
mapping,
name)
@property
def rinvertor(self):
"""
Returns the right invertor for an invertible cell.
Returns
-------
rinvertor : :class:`Diagram`
The right invertor.
Raises
------
:class:`ValueError`
If the diagram is not an invertible cell.
"""
if not self.isinvertiblecell:
raise ValueError(utils.value_err(
self, 'not an invertible cell'))
top = self.mapping[-1][0]
if not self.isdegenerate:
top_rinvertor = self.ambient.generators[top]['rinvertor']
if self.shape == self.ambient.generators[top]['shape']:
return self.ambient[top_rinvertor]
inverse = self.inverse
rpaste, rpaste_cospan = self.paste(inverse, cospan=True)
unit = self.input.unit()
atom_cospan = rpaste.shape.atom(unit.shape, cospan=True)
shape = atom_cospan.target
mapping = Diagram._paste_fill_mapping(
rpaste, unit, atom_cospan)
if not self.isdegenerate:
mapping[-1][0] = top_rinvertor
else:
mapping[-1][0] = top
name = 'inv({}, {})'.format(self.name, inverse.name)
return Diagram._new(
shape,
self.ambient,
mapping,
name)
@property
def linvertor(self):
"""
Returns the left invertor for an invertible cell.
Returns
-------
linvertor : :class:`Diagram`
The left invertor.
Raises
------
:class:`ValueError`
If the diagram is not an invertible cell.
"""
if not self.isinvertiblecell:
raise ValueError(utils.value_err(
self, 'not an invertible cell'))
top = self.mapping[-1][0]
if not self.isdegenerate:
top_linvertor = self.ambient.generators[top]['linvertor']
if self.shape == self.ambient.generators[top]['shape']:
return self.ambient[top_linvertor]
inverse = self.inverse
lpaste, lpaste_cospan = inverse.paste(self, cospan=True)
unit = self.output.unit()
atom_cospan = lpaste.shape.atom(unit.shape, cospan=True)
shape = atom_cospan.target
mapping = Diagram._paste_fill_mapping(
lpaste, unit, atom_cospan)
if not self.isdegenerate:
mapping[-1][0] = top_linvertor
else:
mapping[-1][0] = top
name = 'inv({}, {})'.format(inverse.name, self.name)
return Diagram._new(
shape,
self.ambient,
mapping,
name)
@property
def composite(self):
"""
Returns the composite of the diagram, if it exists.
Returns
-------
composite : :class:`Diagram`
The composite.
Raises
------
:class:`ValueError`
If the diagram does not have a composite.
"""
if not self.hascomposite:
raise ValueError(utils.value_err(
self, 'does not have a composite'))
if self.iscell:
return self
compositorname = self._find_compositor()
name = self.ambient.generators[compositorname]['composite']
return self.ambient[name]
@property
def compositor(self):
"""
Returns the compositor of the diagram, if it exists.
Returns
-------
compositor : :class:`Diagram`
The compositor.
Raises
------
:class:`ValueError`
If the diagram does not have a composite.
"""
if not self.hascomposite:
raise ValueError(utils.value_err(
self, 'does not have a compositor'))
if self.iscell:
return self.unit()
compositorname = self._find_compositor()
return self.ambient[compositorname]
def generate_layering(self):
"""
Assigns a layering to the diagram, iterating through all
the layerings, and returns it.
Returns
-------
layers : :class:`list[Diagram]`
The generated layering.
"""
self.shape.generate_layering()
return self.layers
def hasse(self, **params):
"""
Bound version of :meth:`hasse.draw`.
Calling :code:`x.hasse(**params)` is equivalent to calling
:code:`hasse.draw(x, **params)`.
"""
from rewalt.hasse import draw
return draw(self, **params)
def draw(self, **params):
"""
Bound version of :meth:`strdiags.draw`.
Calling :code:`x.draw(**params)` is equivalent to calling
:code:`strdiags.draw(x, **params)`.
"""
from rewalt.strdiags import draw
return draw(self, **params)
def draw_boundaries(self, **params):
"""
Bound version of :meth:`strdiags.draw_boundaries`.
Calling :code:`x.draw_boundaries(**params)` is equivalent to
calling :code:`strdiags.draw_boundaries(x, **params)`.
"""
from rewalt.strdiags import draw_boundaries
return draw_boundaries(self, **params)
# Alternative constructors
@staticmethod
def yoneda(shapemap, name=None):
"""
Alternative constructor creating a diagram from
a :class:`shapes.ShapeMap`.
Mathematically, diagrammatic sets are certain sheaves on the
category of shapes and maps of shapes; this constructor
implements the Yoneda embedding of a map of shapes.
Arguments
---------
shapemap : :class:`shapes.Shape`
A map of shapes.
name : :class:`hashable`, optional
The name of the generated diagram.
Returns
-------
yoneda : :class:`Diagram`
The Yoneda-embedded map.
"""
utils.typecheck(shapemap, {
'type': ShapeMap})
return Diagram._new(
shapemap.source,
DiagSet.yoneda(shapemap.target),
shapemap.mapping,
name)
@staticmethod
def with_layers(fst, *layers):
"""
Given a non-zero number of diagrams that can be pasted sequentially
in the top dimension, returns their pasting.
Arguments
---------
fst : :class:`Diagram`
The first diagram.
*layers : :class:`Diagram`
Any number of additional diagrams.
Returns
-------
with_layers : :class:`Diagram`
The pasting of all the diagrams in the top dimension.
Raises
------
:class:`ValueError`
If the diagrams are not pastable.
"""
utils.typecheck(fst, {'type': Diagram})
dim = fst.dim
diagram = fst
for x in layers:
utils.typecheck(x, {
'type': Diagram,
'st': lambda x: x.dim == dim,
'why': 'expecting diagram of dimension {}'.format(
str(dim))})
diagram, cospan = diagram.paste(x, cospan=True)
return Diagram._new(
diagram.shape,
diagram.ambient,
diagram.mapping,
diagram.name)
# Internal methods
@staticmethod
def _new(shape, ambient, mapping, name=None):
def diagramclass():
if isinstance(shape, shapes.Point):
return PointDiagram
if isinstance(shape, shapes.Arrow):
return ArrowDiagram
if isinstance(shape, shapes.Cube):
return CubeDiagram
if isinstance(shape, shapes.Simplex):
return SimplexDiagram
return Diagram
new = Diagram.__new__(diagramclass())
new._shape = shape
new._ambient = ambient
new._mapping = mapping
new._name = name
return new
def _find_compositor(self):
description = {
'shape': self.shape,
'mapping': self.mapping}
for x in self.ambient.compositors:
if self.ambient.compositors[x] == description:
return x
return None
@staticmethod
def _paste_fill_mapping(fst, snd, paste_cospan):
shape = paste_cospan.target
mapping = [
[None for _ in n_data]
for n_data in shape.face_data
]
for x in fst.shape:
y = paste_cospan.fst[x]
mapping[y.dim][y.pos] = fst.mapping[x.dim][x.pos]
for x in snd.shape:
y = paste_cospan.snd[x]
if mapping[y.dim][y.pos] is None:
mapping[y.dim][y.pos] = snd.mapping[x.dim][x.pos]
else:
if mapping[y.dim][y.pos] != snd.mapping[x.dim][x.pos]:
raise ValueError(utils.value_err(
snd, 'boundary does not match '
'boundary of {}'.format(repr(fst))))
return mapping
class SimplexDiagram(Diagram):
"""
Subclass of :class:`Diagram` for diagrams whose shape is an
oriented simplex.
The methods of this class provide an implementation of the
structural maps of a simplicial set.
"""
def simplex_face(self, k):
"""
Returns one of the faces of the simplex.
This is, by definition, the pullback of the diagram along
:code:`self.shape.simplex_face(k)`.
Arguments
---------
k : :class:`int`
The index of the face, ranging from :code:`0` to
:code:`self.dim`.
Returns
-------
simplex_face : :class:`Diagram`
The face.
Raises
------
:class:`ValueError`
If the index is out of range.
"""
face_map = self.shape.simplex_face(k)
name = 'd[{}]({})'.format(
str(k), str(self.name))
return self.pullback(face_map, name)
def simplex_degeneracy(self, k):
"""
Returns one of the degeneracies of the simplex.
This is, by definition, the pullback of the diagram along
:code:`self.shape.simplex_degeneracy(k)`.
Arguments
---------
k : :class:`int`
The index of the degeneracy, ranging from :code:`0` to
:code:`self.dim`.
Returns
-------
simplex_degeneracy : :class:`Diagram`
The degeneracy.
Raises
------
:class:`ValueError`
If the index is out of range.
"""
degeneracy_map = self.shape.simplex_degeneracy(k)
name = 's[{}]({})'.format(
str(k), str(self.name))
return self.pullback(degeneracy_map, name)
class CubeDiagram(Diagram):
"""
Subclass of :class:`Diagram` for diagrams whose shape is an
oriented cube.
The methods of this class provide an implementation of the
structural maps of a cubical set with connections.
"""
def cube_face(self, k, sign):
"""
Returns one of the faces of the cube.
This is, by definition, the pullback of the diagram along
:code:`self.shape.cube_face(k, sign)`.
Arguments
---------
k : :class:`int`
Index of the face, ranging from :code:`0` to
:code:`self.dim - 1`.
sign : :class:`str`
Side: :code:`'-'` or :code:`'+'`.
Returns
-------
cube_face : :class:`Diagram`
The face.
Raises
------
:class:`ValueError`
If the index is out of range.
"""
sign = utils.mksign(sign)
face_map = self.shape.cube_face(k, sign)
name = 'δ[{},{}]({})'.format(
str(k), sign, str(self.name))
return self.pullback(face_map, name)
def cube_degeneracy(self, k):
"""
Returns one of the degeneracies of the cube.
This is, by definition, the pullback of the diagram along
:code:`self.shape.cube_degeneracy(k)`.
Arguments
---------
k : :class:`int`
The index of the degeneracy, ranging from :code:`0` to
:code:`self.dim`.
Returns
-------
cube_degeneracy : :class:`Diagram`
The degeneracy.
Raises
------
:class:`ValueError`
If the index is out of range.
"""
degeneracy_map = self.shape.cube_degeneracy(k)
name = 'σ[{}]({})'.format(
str(k), str(self.name))
return self.pullback(degeneracy_map, name)
def cube_connection(self, k, sign):
"""
Returns one of the connections of the cube.
This is, by definition, the pullback of the diagram along
:code:`self.shape.cube_connection(k, sign)`.
Arguments
---------
k : :class:`int`
Index of the connection, ranging from :code:`0` to
:code:`self.dim - 1`.
sign : :class:`str`
Side: :code:`'-'` or :code:`'+'`.
Returns
-------
cube_face : :class:`Diagram`
The connection.
Raises
------
:class:`ValueError`
If the index is out of range.
"""
sign = utils.mksign(sign)
connection_map = self.shape.cube_connection(k, sign)
name = 'γ[{},{}]({})'.format(
str(k), sign, str(self.name))
return self.pullback(connection_map, name)
class ArrowDiagram(SimplexDiagram, CubeDiagram):
pass
class PointDiagram(SimplexDiagram, CubeDiagram):
"""
Subclass of :class:`Diagram` for diagrams whose shape is a point.
"""
def degeneracy(self, shape):
"""
Given a shape, returns the unique degenerate diagram
of that shape over the point.
This is, by definition, the pullback of the point diagram along
:code:`self.shape.terminal()`.
Arguments
---------
shape : :class:`shapes.Shape`
The shape of the degenerate diagram.
Returns
-------
degeneracy : :class:`Diagram`
The degenerate diagram.
"""
utils.typecheck(shape, {'type': Shape})
return self.pullback(
shape.terminal(), self.name)
| 71,763 | 31.137931 | 77 |
py
|
rewalt
|
rewalt-main/rewalt/ogposets.py
|
"""
Implements oriented graded posets, their elements, subsets, and maps.
"""
import numpy as np
from rewalt import utils
class El(tuple):
"""
Class for elements of an oriented graded poset.
An element is encoded as a pair of non-negative integers:
the dimension of the element, and its position in a linear
order of elements of the given dimension.
Arguments
---------
dim : :class:`int`
The dimension of the element.
pos : :class:`int`
The position of the element.
Examples
--------
>>> x = El(2, 3)
>>> x.dim
2
>>> x.pos
3
"""
def __new__(self, dim, pos):
for x in dim, pos:
utils.typecheck(x, {
'type': int,
'st': lambda n: n >= 0,
'why': 'expecting non-negative integer'
})
return tuple.__new__(El, (dim, pos))
def __repr__(self):
return "El({}, {})".format(repr(self.dim), repr(self.pos))
def __str__(self):
return repr(self)
def __eq__(self, other):
return isinstance(other, El) and \
self.dim == other.dim and self.pos == other.pos
def __hash__(self):
return hash(repr(self))
@property
def dim(self):
"""
Returns the dimension of the element.
Returns
-------
dim : :class:`int`
The dimension of the element.
"""
return self[0]
@property
def pos(self):
"""
Returns the position of the element.
Returns
-------
pos : :class:`int`
The position of the element
"""
return self[1]
def shifted(self, k):
"""
Returns the element of the same dimension, with position shifted
by a given integer.
Parameters
----------
k : :class:`int`
The shift in position.
Returns
-------
shifted : :class:`El`
The shifted element.
"""
utils.typecheck(k, {
'type': int,
'st': lambda n: self.pos + n >= 0,
'why': 'shifted position must be non-negative'
})
return El(self.dim, self.pos + k)
class OgPoset:
"""
Class for oriented graded posets, that is, finite graded posets
with an orientation, defined as a :code:`{'-', '+'}`-labelling of the
edges of their Hasse diagram.
In this implementation, the elements of a given dimension (grade)
are linearly ordered, so that each element is identified by its
dimension and the position in the linear order, encoded as an object
of class :class:`El`.
If :code:`El(n, k)` covers :code:`El(n-1, j)` with orientation
:code:`o`, we say that :code:`El(n-1, j)` is an *input face* of
:code:`El(n, k)` if :code:`o == '-'` and an *output face* of
:code:`El(n, k)` if :code:`o == '+'`.
Defining an :class:`OgPoset` directly is not recommended; use
constructors of :class:`shapes.Shape` instead.
Arguments
---------
face_data : :class:`list[list[dict[set[int]]]]`
Data encoding the oriented graded poset as follows:
:code:`j in face_data[n][k][o]` if and only if
:code:`El(n, k)` covers :code:`El(n-1, j)` with orientation
:code:`o`, where :code:`o == '-'` or :code:`o == '+'`.
coface_data: :class:`list[list[dict[set[int]]]]`
Data encoding the oriented graded poset as follows:
:code:`j in coface_data[n][k][o]` if and only if
:code:`El(n+1, j)` covers :code:`El(n, k)` with orientation
:code:`o`, where :code:`o == '-'` or :code:`o == '+'`.
Keyword arguments
-----------------
wfcheck : :class:`bool`
Check that the data is well-formed (default is :code:`True`)
matchcheck : :class:`bool`
Check that `face_data` and `coface_data` match
(default is :code:`True`)
Notes
-----
Each of `face_data`, `coface_data` determines the other uniquely.
There is an alternative constructor :meth:`from_face_data` that computes
`coface_data` from `face_data`.
Examples
--------
Let us construct explicitly the "oriented face poset" of an arrow, or
directed edge.
>>> face_data = [
... [
... {'-': set(), '+': set()},
... {'-': set(), '+': set()},
... ], [
... {'-': {0}, '+': {1}}
... ]]
>>> coface_data = [
... [
... {'-': {0}, '+': set()},
... {'-': set(), '+': {0}},
... ], [
... {'-': set(), '+': set()}
... ]]
>>> arrow = OgPoset(face_data, coface_data)
This has two 0-dimensional elements and one 1-dimensional element.
>>> arrow.size
[2, 1]
We can visualise its Hasse diagram, with orientation conveyed by colour
(magenta for input, blue for output) and direction of arrows.
>>> arrow.hasse(path='docs/_static/img/OgPoset_arrow.png')
.. image:: ../_static/img/OgPoset_arrow.png
:width: 400
:align: center
We can ask for the faces and cofaces of a specific element.
>>> arrow.faces(El(1, 0), '-')
GrSet(El(0, 0))
>>> arrow.cofaces(El(0, 1))
GrSet(El(1, 0))
We can construct other oriented graded posets using various operations,
such as suspensions, Gray products, joins, or duals.
>>> print(arrow.suspend())
OgPoset with [2, 2, 1] elements
>>> print(arrow * arrow)
OgPoset with [4, 4, 1] elements
>>> print(arrow >> arrow)
OgPoset with [4, 6, 4, 1] elements
>>> print(arrow.dual())
OgPoset with [2, 1] elements
"""
def __init__(self, face_data, coface_data, **params):
wfcheck = params.get('wfcheck', True)
matchcheck = params.get('matchcheck', True)
if wfcheck:
OgPoset._wfcheck(face_data)
if matchcheck:
if not coface_data == OgPoset._coface_from_face(face_data):
raise ValueError(utils.value_err(
coface_data, 'face and coface data do not match'))
self._face_data = face_data
self._coface_data = coface_data
# Enable method chaining syntax
self.gray = self._gray
self.join = self._join
self.suspend = self._suspend
self.dual = self._dual
if hasattr(self, 'atom'):
self.atom = self._atom
if hasattr(self, 'paste'):
self.paste = self._paste
def __str__(self):
return "{} with {} elements".format(
type(self).__name__, str(self.size))
def __getitem__(self, key):
return self.all()[key]
def __contains__(self, item):
if isinstance(item, El):
if item.dim <= self.dim:
if item.pos < self.size[item.dim]:
return True
return False
def __len__(self):
return sum(self.size)
def __iter__(self):
return iter(self.all())
def __eq__(self, other):
return isinstance(other, OgPoset) and \
self.face_data == other.face_data and \
self.coface_data == other.coface_data
def __add__(self, other):
return OgPoset.disjoint_union(self, other)
def __mul__(self, other):
return self.gray(other)
def __pow__(self, times):
utils.typecheck(times, {'type': int})
return self.__class__.gray(*[self for _ in range(times)])
def __rshift__(self, other):
return self.join(other)
def __lshift__(self, other):
return other.join(self)
def __invert__(self):
return self.dual()
@property
def face_data(self):
"""
Returns the face data as given to the object constructor.
An :class:`OgPoset` is meant to be immutable; create a new
object if you need to modify the face data.
Returns
-------
face_data : :class:`list[list[dict[set[int]]]]`
The face data as given to the object constructor.
"""
return self._face_data
@property
def coface_data(self):
"""
Returns the coface data as given to the object constructor.
An :class:`OgPoset` is meant to be immutable; create a new
object if you need to modify the coface data.
Returns
-------
coface_data : :class:`list[list[dict[set[int]]]]`
The coface data as given to the object constructor.
"""
return self._coface_data
@property
def size(self):
"""
Returns the number of elements in each dimension as a list.
Returns
-------
size : :class:`list[int]`
The :code:`k` th entry is the number of
:code:`k` -dimensional elements.
"""
return [len(_) for _ in self.face_data]
@property
def dim(self):
"""
Returns the dimension of the object, that is, the maximum of
the dimensions of its elements.
Returns
-------
dim : :class:`int`
The dimension of the object.
"""
return len(self.face_data) - 1
@property
def as_chain(self):
"""
Returns a "chain complex" representation of the face data.
Returns
-------
chain : :class:`list[numpy.array]`
Encodes the face data as follows:
:code:`chain[n][i][j] == 1` if
:code:`El(n, i)` is an output face of :code:`El(n+1, j)`,
:code:`-1` if it is an input face, :code:`0` otherwise.
"""
size, dim = self.size, self.dim
chain = [
np.zeros((size[i], size[i+1]), dtype=int) for i in range(dim)
]
for n, n_data in enumerate(self.coface_data):
for i, x in enumerate(n_data):
for j in x['-']:
chain[n][i][j] = -1
for j in x['+']:
chain[n][i][j] = 1
return chain
def all(self):
"""
Returns the closed subset of all elements.
Returns
-------
all : :class:`Closed`
The closed subset of all elements of the object.
"""
return Closed(
GrSet(*[El(n, k) for n, n_data in enumerate(self.face_data)
for k, x in enumerate(n_data)]),
self,
wfcheck=False)
def none(self):
"""
Returns the empty closed subset.
Returns
-------
none : :class:`Closed`
The closed subset with no elements.
"""
return Closed(GrSet(), self,
wfcheck=False)
def underset(self, *elements):
"""
Returns the closure of a set of elements in the object.
Arguments
---------
elements : :class:`El`
Any number of elements.
Returns
-------
underset : :class:`Closed`
The downwards closure of the given elements.
"""
return GrSubset(GrSet(*elements), self).closure()
def maximal(self):
"""
Returns the subset of maximal elements, that is, those
that are not covered by any elements.
Returns
-------
maximal : :class:`GrSubset`
The subset of maximal elements.
"""
maximal = GrSet()
for x in self:
if self.cofaces(x) == GrSet():
maximal.add(x)
return GrSubset(maximal, self,
wfcheck=False)
def faces(self, element, sign=None):
"""
Returns the faces of an element as a graded set.
Arguments
---------
element : :class:`El`
An element of the object.
sign : :class:`str`, optional
Orientation: :code:`'-'` for input, :code:`'+'` for output,
:code:`None` (default) for both.
Returns
-------
faces : :class:`GrSet`
The set of faces of the given element.
"""
if sign is None:
return self.faces(element, '-').union(
self.faces(element, '+'))
sign = utils.mksign(sign)
utils.typecheck(element, {
'type': El,
'st': lambda x: x.dim <= self.dim and x.pos <= self.size[x.dim],
'why': 'out of bounds'})
if element.dim == 0:
return GrSet()
return GrSet(
*[El(element.dim-1, i)
for i in self.face_data[element.dim][element.pos][sign]]
)
def cofaces(self, element, sign=None):
"""
Returns the cofaces of an element as a graded set.
Arguments
---------
element : :class:`El`
An element of the object.
sign : :class:`str`, optional
Orientation: :code:`'-'` for input, :code:`'+'` for output,
:code:`None` (default) for both.
Returns
-------
cofaces : :class:`GrSet`
The set of cofaces of the given element.
"""
if sign is None:
return self.cofaces(element, '-').union(
self.cofaces(element, '+'))
sign = utils.mksign(sign)
utils.typecheck(element, {
'type': El,
'st': lambda x: x.dim <= self.dim and x.pos <= self.size[x.dim],
'why': 'out of bounds'})
if element.dim == self.dim:
return GrSet()
return GrSet(
*[El(element.dim + 1, i)
for i in self.coface_data[element.dim][element.pos][sign]]
)
def id(self):
"""
Returns the identity map on the object.
Returns
-------
id : :class:`OgMap`
The identity map on the object.
"""
mapping = [
[
El(n, k) for k, x in enumerate(n_data)
]
for n, n_data in enumerate(self.face_data)
]
return OgMap(self, self, mapping,
wfcheck=False)
def image(self, ogmap):
"""
Returns the image of the object through a map.
Arguments
---------
ogmap : :class:`OgMap`
A map from the object to another :class:`OgPoset`.
Returns
-------
image : :class:`Closed`
The image of the object through the given map.
"""
return self.all().image(ogmap)
def boundary(self, sign=None, dim=None):
"""
Returns the inclusion of the boundary of a given orientation
and dimension into the object.
Arguments
---------
sign : :class:`str`, optional
Orientation: :code:`'-'` for input, :code:`'+'` for output,
:code:`None` (default) for both.
dim : :class:`int`, optional
Dimension of the boundary (default is :code:`self.dim - 1`).
Returns
-------
boundary : :class:`OgMap`
The inclusion of the requested boundary into the object.
"""
if isinstance(dim, int) and dim >= self.dim:
return self.id()
return self.all().boundary(sign, dim).as_map
@property
def input(self):
"""
Alias for :code:`boundary('-')`.
"""
return self.boundary('-')
@property
def output(self):
"""
Alias for :code:`boundary('+')`.
"""
return self.boundary('+')
@classmethod
def from_face_data(cls, face_data,
**params):
"""
Alternative constructor computing `coface_data` from `face_data`.
Arguments
---------
face_data : :class:`list[list[dict[set[int]]]]`
As in the main constructor.
Keyword arguments
-----------------
wfcheck : :class:`bool`
Check that the data is well-formed (default is :code:`True`).
"""
wfcheck = params.get('wfcheck', True)
if wfcheck:
cls._wfcheck(face_data)
coface_data = cls._coface_from_face(face_data)
return cls(face_data, coface_data,
wfcheck=False, matchcheck=False)
@staticmethod
def empty():
"""
Returns the initial oriented graded poset, with no elements.
Returns
-------
empty : :class:`OgPoset`
The empty oriented graded poset.
"""
return OgPoset([], [],
wfcheck=False, matchcheck=False)
@staticmethod
def point():
"""
Returns the terminal oriented graded poset, with a single element.
Returns
-------
point : :class:`OgPoset`
The oriented graded poset with a single element.
"""
return OgPoset(
[[{'-': set(), '+': set()}]],
[[{'-': set(), '+': set()}]],
wfcheck=False, matchcheck=False)
@staticmethod
def coproduct(fst, snd):
"""
Returns the coproduct cospan of two oriented graded posets.
Arguments
---------
fst : :class:`OgPoset`
The first factor of the coproduct.
snd : :class:`OgPoset`
The second factor of the coproduct.
Returns
-------
coproduct : :class:`OgMapPair`
The coproduct cospan.
"""
for x in fst, snd:
utils.typecheck(x, {'type': OgPoset})
# Need to ensure all the data have the same length.
offset1 = max(snd.dim - fst.dim, 0)
offset2 = max(fst.dim - snd.dim, 0)
shift = fst.size
shift = shift + [0 for _ in range(offset1)]
face_data_fst = [
[
{sign: faces for sign, faces in x.items()}
for x in n_data
]
for n_data in fst.face_data] + [[] for _ in range(offset1)]
face_data_snd = [
[
{sign:
{k + shift[n-1] for k in faces}
for sign, faces in x.items()}
for x in n_data
]
for n, n_data in enumerate(snd.face_data)] + [
[] for _ in range(offset2)]
face_data = [x + y
for x, y in zip(face_data_fst, face_data_snd)]
coface_data_fst = [
[
{sign: cofaces for sign, cofaces in x.items()}
for x in n_data
]
for n_data in fst.coface_data] + [
[] for _ in range(offset1)]
coface_data_snd = [
[
{sign:
{k + shift[n+1] for k in cofaces}
for sign, cofaces in x.items()}
for x in n_data
]
for n, n_data in enumerate(snd.coface_data)] + [
[] for _ in range(offset2)]
coface_data = [x + y
for x, y in zip(coface_data_fst, coface_data_snd)]
disjoint_union = OgPoset(face_data, coface_data,
wfcheck=False, matchcheck=False)
mapping_fst = fst.id().mapping
mapping_snd = [
[x.shifted(shift[n]) for x in n_data]
for n, n_data in enumerate(snd.id().mapping)
]
inclusion_fst = OgMap(fst, disjoint_union, mapping_fst,
wfcheck=False)
inclusion_snd = OgMap(snd, disjoint_union, mapping_snd,
wfcheck=False)
return OgMapPair(inclusion_fst, inclusion_snd)
@staticmethod
def disjoint_union(fst, snd):
"""
Returns the disjoint union of two oriented graded posets, that is,
the target of their coproduct cospan.
This method can be called with the math operator :code:`+`, that is,
:code:`fst + snd` is equivalent to :code:`disjoint_union(fst, snd)`.
Arguments
---------
fst : :class:`OgPoset`
The first factor of the disjoint union.
snd : :class:`OgPoset`
The second factor of the disjoint union.
Returns
-------
disjoint_union : :class:`OgPoset`
The disjoint union of the two.
"""
return OgPoset.coproduct(fst, snd).target
@staticmethod
def suspend(ogp, n=1):
"""
Returns the n-fold suspension of an oriented graded poset.
This static method can be also used as a bound method after
an object is initialised, that is, :code:`ogp.suspend(n)` is
equivalent to :code:`suspend(ogp, n)`.
Arguments
---------
ogp : :class:`OgPoset`
The object to suspend.
n : :class:`int`, optional
The number of iterations of the suspension (default is 1).
Returns
-------
suspension : :class:`OgPoset`
The suspended object.
"""
utils.typecheck(ogp, {'type': OgPoset})
utils.typecheck(n, {
'type': int,
'st': lambda n: n >= 0,
'why': 'expecting non-negative integer'})
if n == 0:
return ogp
if n > 1:
return OgPoset.suspend(
OgPoset.suspend(ogp, 1), n-1)
face_data = [
[
{'-': set(), '+': set()},
{'-': set(), '+': set()}
],
[
{'-': x['-'].union({0}), '+': x['+'].union({1})}
for x in ogp.face_data[0]
],
*ogp.face_data[1:]]
coface_data = [
[
{'-': {x.pos for x in ogp[0]}, '+': set()},
{'-': set(), '+': {x.pos for x in ogp[0]}}
], *ogp.coface_data]
return OgPoset(face_data, coface_data,
wfcheck=False, matchcheck=False)
def _suspend(self, n=1):
return self.__class__.suspend(self, n)
@staticmethod
def gray(*ogps):
"""
Returns the Gray product of any number of oriented graded posets.
This method can be called with the math operator :code:`*`, that is,
:code:`fst * snd` is equivalent to :code:`gray(fst, snd)`.
This static method can also be used as a bound method after an object
is initialised, that is, :code:`fst.gray(*ogps)` is equivalent to
:code:`gray(fst, *ogps)`.
Arguments
---------
*ogps : :class:`OgPoset`
Any number of oriented graded posets.
Returns
-------
gray : :class:`OgPoset`
The Gray product of the arguments.
"""
if len(ogps) == 0:
return OgPoset.point()
if len(ogps) == 1:
utils.typecheck(ogps[0], {'type': OgPoset})
return ogps[0]
if len(ogps) > 2:
others = ogps[2:]
return OgPoset.gray(OgPoset.gray(ogps[0], ogps[1]), *others)
fst, snd = ogps[0], ogps[1]
for x in (fst, snd):
utils.typecheck(x, {'type': OgPoset})
if len(fst) == 0 or len(snd) == 1:
return fst
if len(fst) == 1 or len(snd) == 0:
return snd
size1 = fst.size + [0 for _ in range(snd.dim)]
size2 = snd.size + [0 for _ in range(fst.dim)]
def pair(x, y):
dim = x.dim + y.dim
pos = y.pos + x.pos*size2[y.dim] + sum(
[size1[k]*size2[dim-k] for k in range(x.dim)])
return El(dim, pos)
def sndsign(n, sign):
if n % 2 == 1:
return utils.flip(sign)
return sign
face_data = [[] for _ in range(fst.dim + snd.dim + 1)]
coface_data = [[] for _ in range(fst.dim + snd.dim + 1)]
for x in fst:
for y in snd:
dim = x.dim + y.dim
face_data[dim].append(
{sign:
{pair(z, y).pos
for z in fst.faces(x, sign)
}.union(
{pair(x, z).pos
for z in snd.faces(y, sndsign(x.dim, sign))
})
for sign in ('-', '+')})
coface_data[dim].append(
{sign:
{pair(z, y).pos
for z in fst.cofaces(x, sign)
}.union(
{pair(x, z).pos
for z in snd.cofaces(y, sndsign(x.dim, sign))
})
for sign in ('-', '+')})
return OgPoset(face_data, coface_data,
wfcheck=False, matchcheck=False)
def _gray(self, *others):
return self.__class__.gray(self, *others)
def bot(self):
"""
Returns the object augmented with a bottom element, covered
with orientation :code:`'+'`.
Returns
-------
bot : :class:`OgPoset`
The object augmented with a bottom element.
"""
if len(self) == 0:
return OgPoset.point()
face_data = [
[{'-': set(), '+': set()}],
[{'-': x['-'], '+': x['+'].union({0})}
for x in self.face_data[0]],
*self.face_data[1:]]
coface_data = [[
{'-': set(), '+': {k for k in range(self.size[0])}}
], *self.coface_data]
return OgPoset(face_data, coface_data,
wfcheck=False, matchcheck=False)
@staticmethod
def join(*ogps):
"""
Returns the join of any number of oriented graded posets.
This method can be called with the shift operators :code:`>>`
and :code:`<<`, that is, :code:`fst >> snd` is equivalent to
:code:`join(fst, snd)` and :code:`fst << snd` is equivalent to
:code:`join(snd, fst)`.
This static method can also be used as a bound method after an
object is initialised, that is, :code:`fst.join(*ogps)` is
equivalent to :code:`join(fst, *ogps)`.
Arguments
---------
*ogps : :class:`OgPoset`
Any number of oriented graded posets.
Returns
-------
join : :class:`OgPoset`
The join of the arguments.
"""
if len(ogps) == 0:
return OgPoset.empty()
if len(ogps) == 1:
utils.typecheck(ogps[0], {'type': OgPoset})
return ogps[0]
if len(ogps) > 2:
others = ogps[2:]
return OgPoset.join(OgPoset.join(ogps[0], ogps[1]), *others)
fst, snd = ogps[0], ogps[1]
for x in (fst, snd):
utils.typecheck(x, {'type': OgPoset})
if len(fst) == 0:
return snd
if len(snd) == 0:
return fst
join_bot = OgPoset.gray(
fst.bot(),
snd.bot())
face_data = join_bot.face_data[1:]
for x in face_data[0]:
x['+'].clear()
coface_data = join_bot.coface_data[1:]
return OgPoset(face_data, coface_data,
wfcheck=False, matchcheck=False)
def _join(self, *others):
return self.__class__.join(self, *others)
@staticmethod
def dual(ogp, *dims):
"""
Returns an oriented graded poset with orientations reversed
in given dimensions.
The dual in all dimensions can also be called with the bit negation
operator :code:`~`, that is, :code:`~ogp` is equivalent to
:code:`ogp.dual()`.
This static method can be also used as a bound method after an object
is initialised, that is, :code:`ogp.dual(*dims)` is equivalent to
:code:`dual(ogp, *dims)`.
Arguments
---------
ogp : :class:`OgPoset`
An oriented graded poset.
*dims : :class:`int`
Any number of dimensions; if none, defaults to *all* dimensions.
Returns
-------
dual : :class:`OgPoset`
The oriented graded poset, dualised in the given dimensions.
"""
utils.typecheck(ogp, {'type': OgPoset})
for x in dims:
utils.typecheck(x, {'type': int})
if len(dims) == 0: # default is dualising in all dimensions
dims = range(ogp.dim + 1)
face_data = [
[
{sign: x[utils.flip(sign)] if n in dims else x[sign]
for sign in ('-', '+')}
for x in n_data
]
for n, n_data in enumerate(ogp.face_data)]
coface_data = [
[
{sign: x[utils.flip(sign)] if n+1 in dims else x[sign]
for sign in ('-', '+')}
for x in n_data
]
for n, n_data in enumerate(ogp.coface_data)]
return OgPoset(face_data, coface_data,
wfcheck=False, matchcheck=True)
def _dual(self, *dims):
return self.__class__.dual(self, *dims)
def op(self):
"""
Returns the :meth:`dual` in all *odd* dimensions.
"""
odds = [n for n in range(self.dim + 1) if n % 2 == 1]
return self.dual(*odds)
def co(self):
"""
Returns the :meth:`dual` in all *even* dimensions.
"""
evens = [n for n in range(self.dim + 1) if n % 2 == 0]
return self.dual(*evens)
def hasse(self, **params):
"""
Bound version of :meth:`hasse.draw`.
Calling :code:`x.hasse(**params)` is equivalent to calling
:code:`hasse.draw(x, **params)`.
"""
from rewalt.hasse import draw
return draw(self, **params)
# Private methods
@staticmethod
def _wfcheck(face_data):
""" Internal method checking that face data is well-formed. """
utils.typecheck(face_data, {'type': list}, {
'type': list,
'st': lambda x: len(x) > 0,
'why': 'expecting non-empty list'
}, {
'type': dict,
'st': lambda x: x.keys() == {'-', '+'},
'why': "expecting dict with keys '-', '+'"
}, {'type': set}, {
'type': int,
'st': lambda x: x >= 0,
'why': 'expecting non-negative integer'})
sizes = [len(n_data) for n_data in face_data]
for n, n_data in enumerate(face_data):
# Check that faces are within bounds.
k = max([i for x in n_data for sign in x for i in x[sign]],
default=-1)
if (n == 0 and k >= 0) or k >= sizes[n-1]:
raise ValueError(utils.value_err(k, 'out of bounds'))
# Check that input/output are inhabited and disjoint.
for i, x in enumerate(n_data):
if not x['-'].isdisjoint(x['+']):
raise ValueError(utils.value_err(
face_data,
'input and output faces of El({}, {}) '
'are not disjoint'.format(
repr(n), repr(i))))
if n > 0 and x['-'] == x['+'] == set():
raise ValueError(utils.value_err(
face_data,
'El({}, {}) must have at least one face'.format(
repr(n), repr(i))))
@staticmethod
def _coface_from_face(face_data):
"""
Internal method constructing coface data from face data.
Face data is presumed to be well-formed.
"""
coface_data = [
[
{'-': set(), '+': set()} for _ in n_data
]
for n_data in face_data]
for n, sn_data in enumerate(face_data[1:]):
for k, x in enumerate(sn_data):
for sign in '-', '+':
for i in x[sign]:
coface_data[n][i][sign].add(k)
return coface_data
class GrSet:
"""
Class for sets of elements of an oriented graded poset, graded
by their dimension.
Objects of the class behave as sets; several methods of the set class
are supported. However the data is stored in a way that allows
fast access to elements of a given dimension.
Arguments
---------
elements : :class:`El`
Any number of elements.
Examples
--------
We create an instance by listing elements; repetitions do not count.
>>> test = GrSet(El(0, 2), El(0, 2), El(0, 3), El(2, 0), El(3, 1))
>>> test
GrSet(El(0, 2), El(0, 3), El(2, 0), El(3, 1))
>>> len(test)
4
We can access the subsets of elements of given dimensions with indexer
operators. These support slice syntax.
>>> test[0]
GrSet(El(0, 2), El(0, 3))
>>> test[0:3]
GrSet(El(0, 2), El(0, 3), El(2, 0))
The iterator for graded sets goes through the elements in increasing
dimension and, for each dimension, in increasing position.
>>> for x in test:
... print(x)
...
El(0, 2)
El(0, 3)
El(2, 0)
El(3, 1)
We can add and remove elements.
>>> test.remove(El(0, 3))
>>> test
GrSet(El(0, 2), El(2, 0), El(3, 1))
>>> test.add(El(1, 1))
>>> test
GrSet(El(0, 2), El(1, 1), El(2, 0), El(3, 1))
Set methods such as union, difference, and intersection are available
with the same syntax.
"""
def __init__(self, *elements):
self._elements = {}
for x in elements:
self.add(x)
def __repr__(self):
return "{}({})".format(
type(self).__name__,
', '.join([repr(x) for x in self.as_list]))
def __str__(self):
return repr(self)
def __contains__(self, item):
if isinstance(item, El):
if item.dim in self.grades:
if item.pos in self._elements[item.dim]:
return True
return False
def __len__(self):
""" Returns the total number of elements. """
return len(self.as_list)
def __iter__(self):
""" The iterator is the iterator of the element list. """
return iter(self.as_list)
def __getitem__(self, key):
if isinstance(key, int) and key >= -2:
if key in self.grades:
return GrSet(*[El(key, k) for k in self._elements[key]])
return GrSet()
if isinstance(key, slice):
stop = key.stop if key.stop is not None else self.dim + 1
indices = list(range(stop)[key])
return GrSet().union(*[self[n] for n in indices])
raise KeyError(str(key))
def __eq__(self, other):
return isinstance(other, GrSet) and \
self._elements == other._elements
@property
def grades(self):
"""
Returns the list of dimensions in which the graded set is not empty.
Returns
-------
grades : :class:`list[int]`
The list of dimensions in which the graded set is not empty.
"""
return sorted(self._elements.keys())
@property
def dim(self):
"""
Returns the maximal dimension in which the graded set is not empty,
or -1 if it is empty.
Returns
-------
dim : :class:`int`
The maximal dimension in which the graded set is not empty.
"""
return max(self.grades, default=-1)
@property
def as_set(self):
"""
Returns a Python set containing the same elements.
Returns
-------
as_set : :class:`set[El]`
A Python set containing the same elements.
"""
return {El(n, k) for n in self._elements for k in self._elements[n]}
@property
def as_list(self):
"""
Returns the list of elements in increasing dimension, and,
dimensionwise, in increasing position.
Returns
-------
as_list : :class:`list[El]`
A list containing the same elements.
"""
return [El(n, k) for n in sorted(self._elements)
for k in sorted(self._elements[n])]
def add(self, element):
"""
Adds a single element.
Arguments
---------
element : :class:`El`
The element to add.
"""
utils.typecheck(element, {'type': El})
if element.dim in self.grades:
self._elements[element.dim].add(element.pos)
else:
self._elements[element.dim] = {element.pos}
def remove(self, element):
"""
Removes a single element.
Arguments
---------
element : :class:`El`
The element to remove.
"""
if element not in self:
raise ValueError(utils.value_err(
element, 'not in graded set'))
self._elements[element.dim].remove(element.pos)
if self._elements[element.dim] == set():
del self._elements[element.dim]
def union(self, *others):
"""
Returns the union of the graded set with other graded sets.
Arguments
---------
*others : :class:`GrSet`
Any number of graded sets.
Returns
-------
union : :class:`GrSet`
The union of the graded set with all the given others.
"""
for x in others:
utils.typecheck(x, {'type': GrSet})
union_as_set = self.as_set.union(*[x.as_set for x in others])
return GrSet(*union_as_set)
def intersection(self, *others):
"""
Returns the intersection of the graded set with other graded sets.
Arguments
---------
*others : :class:`GrSet`
Any number of graded sets.
Returns
-------
intersection : :class:`GrSet`
The intersection of the graded set with all the given others.
"""
for x in others:
utils.typecheck(x, {'type': GrSet})
intersection_as_set = self.as_set.intersection(
*[x.as_set for x in others])
return GrSet(*intersection_as_set)
def difference(self, other):
"""
Returns the difference of the graded set with another graded set.
Arguments
---------
other : :class:`GrSet`
Another graded set.
Returns
-------
difference : :class:`GrSet`
The difference between the two graded sets.
"""
utils.typecheck(other, {'type': GrSet})
difference_as_set = self.as_set.difference(other.as_set)
return GrSet(*difference_as_set)
def issubset(self, other):
"""
Returns whether the graded set is a subset of another.
Arguments
---------
other : :class:`GrSet`
Another graded set.
Returns
-------
issubset : :class:`bool`
:code:`True` if and only `self` is a subset of `other`.
"""
utils.typecheck(other, {'type': GrSet})
return self.as_set.issubset(other.as_set)
def isdisjoint(self, other):
"""
Returns whether the graded set is disjoint from another.
Arguments
---------
other : :class:`GrSet`
Another graded set.
Returns
-------
isdisjoint : :class:`bool`
:code:`True` if and only `self` and `other` are disjoint.
"""
utils.typecheck(other, {'type': GrSet})
return self.as_set.isdisjoint(other.as_set)
def copy(self):
"""
Returns a copy of the graded set.
Returns
-------
copy : :class:`GrSet`
A copy of the graded set.
"""
return GrSet(*self)
class GrSubset:
"""
Class for graded subsets, that is, pairs of a :class:`GrSet` and an
"ambient" :class:`OgPoset`, where the first is seen as a subset of
the second.
While objects of the class :class:`GrSet` are mutable, once they are
tied to an :class:`OgPoset` they should be treated as immutable.
Arguments
---------
support : :class:`GrSet`
The underlying graded set.
ambient : :class:`OgPoset`
The ambient oriented graded poset.
Keyword arguments
-----------------
wfcheck : :class:`bool`
Check whether the support is a well-formed subset of the ambient,
that is, it has no elements out of range (default is :code:`True`).
Notes
-----
Two graded subsets are equal if and only if they have the same
elements, *and* they are subsets of the same :class:`OgPoset`.
Examples
--------
We create an oriented graded poset and a pair of graded sets.
>>> point = OgPoset.point()
>>> triangle = point >> point >> point
>>> set1 = GrSet(El(1, 1), El(0, 1))
>>> set2 = GrSet(El(0, 3))
We can attach :code:`set1` to :code:`triangle` as a subset.
>>> subset = GrSubset(set1, triangle)
>>> assert subset.support == set1
Trying to do the same with :code:`set2` returns a :class:`ValueError`
because :code:`El(0, 3)` is out of range.
We can compute the downwards closure of :code:`set1` in
:code:`triangle`.
>>> subset.closure().support
GrSet(El(0, 0), El(0, 1), El(0, 2), El(1, 1))
All the set-theoretic operations apply to graded subsets as long
as they have the same ambient :class:`OgPoset`.
"""
def __init__(self, support, ambient, **params):
wfcheck = params.get('wfcheck', True)
if wfcheck:
GrSubset._wfcheck(support, ambient)
self._support = support
self._ambient = ambient
def __eq__(self, other):
return isinstance(other, GrSubset) and \
self.support == other.support and \
self.ambient == other.ambient
def __str__(self):
return '{} with {} elements in {}'.format(
type(self).__name__,
str(len(self.support)),
str(self.ambient))
def __contains__(self, item):
return item in self.support
def __len__(self):
return len(self.support)
def __getitem__(self, key):
return GrSubset(self.support[key], self.ambient,
wfcheck=False)
def __iter__(self):
return iter(self.support)
@property
def support(self):
"""
Returns the underlying graded set (the "support" of the subset).
Returns
-------
support : :class:`GrSet`
The underlying graded set.
"""
return self._support
@property
def ambient(self):
"""
Returns the ambient oriented graded poset.
Returns
-------
ambient : :class:`OgPoset`
The ambient oriented graded poset.
"""
return self._ambient
@property
def dim(self):
"""
Shorthand for :code:`support.dim`.
"""
return self.support.dim
@property
def isclosed(self):
"""
Returns whether the subset is (downwards) closed.
Returns
-------
isclosed : :class:`bool`
:code:`True` if and only if the subset is downwards closed.
"""
for n in range(self.dim, 0, -1):
for x in self[n]:
for face in self.ambient.faces(x):
if face not in self:
return False
return True
def union(self, *others):
"""
Returns the union with other graded subsets of the same oriented
graded poset.
Arguments
---------
*others : :class:`GrSubset`
Any number of graded subsets of the same oriented graded poset.
Returns
-------
union : :class:`GrSubset`
The union of the graded subset with all the given others.
Notes
-----
If all the arguments have type :class:`Closed`, the union also
has type :class:`Closed`.
"""
others_support = []
same_type = True
for x in others:
utils.typecheck(x, {
'type': GrSubset,
'st': lambda x: x.ambient == self.ambient,
'why': 'not a subset of the same OgPoset'
})
if type(x) is not type(self):
same_type = False
others_support.append(x.support)
union = self.support.union(*others_support)
if same_type: # return a Closed iff all are Closed
return self.__class__(union, self.ambient,
wfcheck=False)
return GrSubset(union, self.ambient,
wfcheck=False)
def intersection(self, *others):
"""
Returns the intersection with other graded subsets of the same
oriented graded poset.
Arguments
---------
*others : :class:`GrSubset`
Any number of graded subsets of the same oriented graded poset.
Returns
-------
intersection : :class:`GrSubset`
The intersection of the graded subset with all the given others.
Notes
-----
If all the arguments have type :class:`Closed`, the intersection also
has type :class:`Closed`.
"""
others_support = []
same_type = True
for x in others:
utils.typecheck(x, {
'type': GrSubset,
'st': lambda x: x.ambient == self.ambient,
'why': 'not a subset of the same OgPoset'
})
if type(x) is not type(self):
same_type = False
others_support.append(x.support)
intersection = self.support.intersection(*others_support)
if same_type: # return a Closed iff all are Closed
return self.__class__(intersection, self.ambient,
wfcheck=False)
return GrSubset(intersection, self.ambient,
wfcheck=False)
def difference(self, other):
"""
Returns the difference with another graded subset of the same
oriented graded poset.
Arguments
---------
other : :class:`GrSubset`
Another graded subset of the same oriented graded poset.
Returns
-------
difference : :class:`GrSubset`
The difference between the two graded subsets.
"""
utils.typecheck(other, {
'type': GrSubset,
'st': lambda x: x.ambient == self.ambient,
'why': 'not a subset of the same OgPoset'
})
difference = self.support.difference(other.support)
return GrSubset(difference, self.ambient,
wfcheck=False)
def issubset(self, other):
"""
Returns whether the object is a subset of another subset of
the same oriented graded poset.
Arguments
---------
other : :class:`GrSubset`
Another graded subset of the same oriented graded poset.
Returns
-------
issubset : :class:`bool`
:code:`True` if and only `self` is a subset of `other`.
"""
utils.typecheck(other, {
'type': GrSubset,
'st': lambda x: x.ambient == self.ambient,
'why': 'not a subset of the same OgPoset'
})
return self.support.issubset(other.support)
def isdisjoint(self, other):
"""
Returns whether the object is disjoint from another graded subset
of the same oriented graded poset.
Arguments
---------
other : :class:`GrSubset`
Another graded subset of the same oriented graded poset.
Returns
-------
issubset : :class:`bool`
:code:`True` if and only `self` and `other` are disjoint.
"""
utils.typecheck(other, {
'type': GrSubset,
'st': lambda x: x.ambient == self.ambient,
'why': 'not a subset of the same OgPoset'
})
return self.support.isdisjoint(other.support)
def closure(self):
"""
Returns the downwards closure of the graded subset.
Returns
-------
closure : :class:`Closed`
The downwards closure of the subset.
"""
closure = self.support.copy()
for n in range(self.dim, 0, -1):
for element in closure[n]:
for face in self.ambient.faces(element):
closure.add(face)
return Closed(closure, self.ambient,
wfcheck=False)
def image(self, ogmap):
"""
Returns the image of the graded subset through a map of oriented
graded posets.
Arguments
---------
ogmap : :class:`OgMap`
A map from the ambient to another :class:`OgPoset`.
Returns
-------
image : :class:`GrSubset`
The image of the subset through the given map.
Notes
-----
If the object has type :class:`Closed`, its image has also type
:class:`Closed`.
"""
utils.typecheck(ogmap, {
'type': OgMap,
'st': lambda x: x.source == self.ambient,
'why': 'OgMap source does not match ambient OgPoset'})
image = GrSet()
for x in self:
if ogmap.isdefined(x):
image.add(ogmap[x])
# image of a Closed through an OgMap is Closed
return self.__class__(image, ogmap.target,
wfcheck=False)
# Internal methods
@staticmethod
def _wfcheck(support, ambient):
utils.typecheck(support, {'type': GrSet})
utils.typecheck(ambient, {'type': OgPoset})
if support.dim > ambient.dim:
raise ValueError(utils.value_err(
support, 'does not define a subset'))
for n in support.grades:
if max([x.pos for x in support[n]]) >= ambient.size[n]:
raise ValueError(utils.value_err(
support, 'does not define a subset'))
class Closed(GrSubset):
"""
Subclass of :class:`GrSubset` for (downwards) closed subsets.
Implements a number of methods that do not make sense for
non-closed subsets, in particular those computing input and output
boundaries in each dimension.
Arguments
---------
support : :class:`GrSet`
The underlying graded set.
ambient : :class:`OgPoset`
The ambient oriented graded poset.
Keyword arguments
-----------------
wfcheck : :class:`bool`
Check whether the support is a well-formed, closed subset of
the ambient (default is :code:`True`).
Notes
-----
There is an alternative constructor :meth:`subset` which takes
a :class:`GrSubset`, and "upgrades" it to a :class:`Closed` if it
is downwards closed.
Examples
--------
After creating an oriented graded poset, we can obtain the closed
subset of *all* its elements with :meth:`OgPoset.all`.
>>> point = OgPoset.point()
>>> triangle = point >> point >> point
>>> all = triangle.all()
We can compute its input and output boundary...
>>> all_in = all.input
>>> all_out = all.output
And since :code:`all` happens to be a *molecule*, we can check the
"globular" relations.
>>> assert all_in.input == all_out.input
>>> assert all_in.output == all_out.output
"""
def __init__(self, support, ambient, **params):
wfcheck = params.get('wfcheck', True)
super().__init__(support, ambient, **params)
if wfcheck:
if not self.isclosed:
raise ValueError(utils.value_err(
support, 'not a closed subset'))
@property
def as_map(self):
"""
Returns an injective map representing the inclusion of the closed
subset in the ambient.
Returns
-------
as_map : :class:`OgMap`
A map of oriented graded posets representing the inclusion of
the closed subset.
"""
mapping = [self.support[n].as_list
for n in range(self.dim + 1)]
face_data = [
[
{sign:
{mapping[n-1].index(y)
for y in self.ambient.faces(x, sign)}
for sign in ('-', '+')}
for x in n_data]
for n, n_data in enumerate(mapping)]
source = OgPoset.from_face_data(face_data, wfcheck=False)
return OgMap(source, self.ambient, mapping,
wfcheck=False)
@property
def ispure(self):
"""
Returns whether the maximal elements of the closed subset all have
the same dimension.
Returns
-------
ispure : :class:`bool`
:code:`True` if and only if the subset is pure.
"""
for x in self.maximal():
if x.dim != self.dim:
return False
return True
@property
def isround(self):
"""
Returns whether the closed subset is round ("has spherical
boundary").
This means that, for all :code:`k` smaller than the dimension of
the subset, the intersection of its input :code:`k`-boundary
and of its output :code:`k`-boundary is equal to its :code:`(k-1)`-
boundary.
Returns
-------
isround : :class:`bool`
:code:`True` if and only if the subset is round.
"""
if not self.ispure:
return False
boundary_in = self.boundary('-')
boundary_out = self.boundary('+')
intersection = boundary_in.intersection(boundary_out)
for k in range(self.dim-2, -1, -1):
boundary_in = boundary_in.boundary('-')
boundary_out = boundary_out.boundary('+')
if not intersection.issubset(boundary_in.union(boundary_out)):
return False
intersection = boundary_in.intersection(boundary_out)
return True
def maximal(self):
"""
Returns the subset of maximal elements, that is, those that are not
covered by any other element in the closed subset.
Returns
-------
maximal : :class:`GrSubset`
The subset of maximal elements.
"""
maximal = GrSet()
for x in self:
if self.ambient.cofaces(x).isdisjoint(
self.support[x.dim + 1]):
maximal.add(x)
return GrSubset(maximal, self.ambient,
wfcheck=False)
def boundary_max(self, sign=None, dim=None):
"""
Returns the subset of maximal elements of the boundary of a given
orientation and dimension.
Arguments
---------
sign : :class:`str`, optional
Orientation: :code:`'-'` for input, :code:`'+'` for output,
:code:`None` (default) for both.
dim : :class:`int`, optional
Dimension of the boundary (default is :code:`self.dim - 1`).
Returns
-------
boundary_max : :class:`GrSubset`
The maximal elements of the requested boundary.
"""
_sign = utils.flip(
utils.mksign(sign)) if sign is not None else '-'
dim = self.dim-1 if dim is None else dim
boundary_max = self.maximal().support[:dim]
for x in self[dim]:
if self.ambient.cofaces(x, _sign).isdisjoint(
self.support[x.dim + 1]):
boundary_max.add(x)
if sign is None and self.ambient.cofaces(x, '+').isdisjoint(
self.support[x.dim + 1]):
boundary_max.add(x)
return GrSubset(boundary_max, self.ambient,
wfcheck=False)
def boundary(self, sign=None, dim=None):
"""
Returns the boundary of a given orientation and dimension.
Arguments
---------
sign : :class:`str`, optional
Orientation: :code:`'-'` for input, :code:`'+'` for output,
:code:`None` (default) for both.
dim : :class:`int`, optional
Dimension of the boundary (default is :code:`self.dim - 1`).
Returns
-------
boundary : :class:`Closed`
The requested boundary subset.
"""
if isinstance(dim, int) and dim >= self.dim:
return self
return self.boundary_max(sign, dim).closure()
@property
def input(self):
"""
Alias for :code:`boundary('-')`.
"""
return self.boundary('-')
@property
def output(self):
"""
Alias for :code:`boundary('+')`.
"""
return self.boundary('+')
@staticmethod
def subset(grsubset, **params):
"""
Alternative constructor that promotes a :class:`GrSubset` to a
:class:`Closed`.
Arguments
---------
grsubset : :class:`GrSubset`
The subset to promote.
Keyword arguments
-----------------
wfcheck : :class:`bool`
Check whether the subset is downwards closed
(default is :code:`True`).
"""
wfcheck = params.get('wfcheck', True)
if wfcheck:
if not grsubset.isclosed:
raise ValueError(grsubset.support, 'not a closed subset')
return Closed(grsubset.support, grsubset.ambient,
wfcheck=False)
class OgMap:
"""
Class for (partial) maps of oriented graded posets, compatible with
boundaries.
To define a map on one element, it must have been defined on all
elements below it. The assignment can be made all at once, or element
by element. Once the map has been defined on an element, the assignment
cannot be modified.
Arguments
---------
source : :class:`OgPoset`
The source (domain) of the map.
target : :class:`OgPoset`
The target (codomain) of the map.
mapping : :class:`list[list[El]]`, optional
Data specifying the partial map as follows:
:code:`mapping[n][k] == El(m, j)` if the map sends :code:`El(n, k)`
to :code:`El(m, j)`, and :code:`None` if the map is undefined
on :code:`El(n, k)` (default is the nowhere defined map).
Keyword arguments
-----------------
wfcheck : :class:`bool`
Check whether the data defines a well-formed map compatible with
all boundaries (default is :code:`True`).
Notes
-----
Objects of the class are callable on objects of type :class:`El`
(returning the image of an element) and of type :class:`GrSubset` and
:class:`GrSet` (returning the image of a subset of their source).
Examples
--------
Let us create two simple oriented graded posets, the "point" and the
"arrow".
>>> point = OgPoset.point()
>>> arrow = point >> point
We define the map that collapses the arrow onto the point. First we
create a nowhere defined map.
>>> collapse = OgMap(arrow, point)
>>> assert not collapse.istotal
We declare the assignment first on the 0-dimensional elements, then on
the single 1-dimensional element. Trying to do otherwise results in
a :class:`ValueError`.
>>> collapse[El(0, 0)] = El(0, 0)
>>> collapse[El(0, 1)] = El(0, 0)
>>> collapse[El(1, 0)] = El(0, 0)
We can check various properties of the map.
>>> assert collapse.istotal
>>> assert collapse.issurjective
>>> assert not collapse.isinjective
Alternatively, we could have defined the map all at once, as follows.
>>> mapping = [[El(0, 0), El(0, 0)], [El(0, 0)]]
>>> assert collapse == OgMap(arrow, point, mapping)
"""
def __init__(self, source, target, mapping=None, **params):
wfcheck = params.get('wfcheck', True)
if wfcheck:
OgMap._wfcheck(source, target, mapping)
self._source = source
self._target = target
if mapping is None:
mapping = [[None for _ in n_data]
for n_data in self.source.face_data]
self._mapping = mapping
# Enable method chaining syntax
self.gray = self._gray
self.join = self._join
self.dual = self._dual
def __eq__(self, other):
return isinstance(other, OgMap) and \
self.source == other.source and \
self.target == other.target and \
self.mapping == other.mapping
def __str__(self):
return '{} from {} to {}'.format(
type(self).__name__,
str(self.source), str(self.target))
def __getitem__(self, element):
if element in self.source:
return self.mapping[element.dim][element.pos]
raise ValueError(utils.value_err(
element, 'not in source'))
def __setitem__(self, element, image):
if element not in self.source:
raise ValueError(utils.value_err(
element, 'not in source'))
self._extensioncheck(element, image)
self._mapping[element.dim][element.pos] = image
def __call__(self, other):
if isinstance(other, El):
return self[other]
if isinstance(other, GrSubset):
return other.image(self)
if isinstance(other, GrSet):
subset = GrSubset(other, self.source)
return subset.image(self)
raise TypeError(utils.type_err(
El, other))
def __mul__(self, other):
return self.gray(other)
def __pow__(self, times):
utils.typecheck(times, {'type': int})
return self.__class__.gray(*[self for _ in range(times)])
def __rshift__(self, other):
return self.join(other)
def __lshift__(self, other):
return other.join(self)
def __invert__(self):
return self.dual()
@property
def source(self):
"""
Returns the source (domain) of the map.
Returns
-------
source : :class:`OgPoset`
The source of the map.
"""
return self._source
@property
def target(self):
"""
Returns the target (codomain) of the map.
Returns
-------
target : :class:`OgPoset`
The target of the map.
"""
return self._target
@property
def mapping(self):
"""
Returns the data specifying the map's assignments.
Returns
-------
mapping : :class:`list[list[El]]`
The mapping data.
"""
return self._mapping
@property
def istotal(self):
"""
Returns whether the map is total.
Returns
-------
istotal : :class:`bool`
:code:`True` if and only if the map is total.
"""
for n_data in self.mapping:
if not all(n_data):
return False
return True
@property
def isinjective(self):
"""
Returns whether the map is injective.
Returns
-------
isinjective : :class:`bool`
:code:`True` if and only if the map is injective.
"""
image_list = [x for n_data in self.mapping for x in n_data
if x is not None]
if len(image_list) == len(set(image_list)):
return True
return False
@property
def issurjective(self):
"""
Returns whether the map is surjective.
Returns
-------
issurjective : :class:`bool`
:code:`True` if and only if the map is surjective.
"""
image_set = {x for n_data in self.mapping for x in n_data
if x is not None}
if len(image_set) == len(self.target):
return True
return False
@property
def isiso(self):
"""
Returns whether the map is an isomorphism, that is, total,
injective, and surjective.
Returns
-------
isiso : :class:`bool`
:code:`True` if and only if the map is an isomorphism.
"""
return self.istotal and self.isinjective and self.issurjective
def isdefined(self, element):
"""
Returns whether the map is defined on a given element.
Arguments
---------
element : :class:`El`
The element to check.
Returns
-------
isdefined : :class:`bool`
:code:`True` if and only if the map is defined on the element.
"""
if element in self.source and self[element] is not None:
return True
return False
def then(self, other, *others):
"""
Returns the composite with other maps or pairs of maps of
oriented graded posets, when defined.
If given an :class:`OgMapPair` as argument, it returns
the pair of composites of the map with each map in the pair.
Arguments
---------
other : :class:`OgMap` | :class:`OgMapPair`
The first map or pair of maps to follow.
*others : :class:`OgMap` | :class:`OgMapPair`, optional
Any number of other maps or pair of maps to follow.
Returns
-------
composite : :class:`OgMap` | :class:`OgMapPair`
The composite with all the other arguments.
Notes
-----
If all the maps have type :class:`shapes.ShapeMap`, their
composite has the same type.
"""
if len(others) > 0:
return self.then(other).then(*others)
if isinstance(other, OgMapPair):
return OgMapPair(
self.then(other.fst),
self.then(other.snd))
utils.typecheck(other, {
'type': OgMap,
'st': lambda x: x.source == self.target,
'why': 'source does not match target of first map'})
mapping = [
[other.mapping[x.dim][x.pos] if x is not None
else None for x in n_data]
for n_data in self.mapping]
return OgMap(self.source, other.target, mapping,
wfcheck=False)
def inv(self):
"""
Returns the inverse of the map if it is an isomorphism.
Returns
-------
inv : :class:`OgMap`
The inverse of the map, if defined.
Raises
------
:class:`ValueError`
If the map is not an isomorphism.
"""
if not self.isiso:
raise ValueError(utils.value_err(
self, 'not an isomorphism'))
mapping_inv = [[None for _ in n_data]
for n_data in self.mapping]
for x in self.source:
image = self[x]
mapping_inv[image.dim][image.pos] = x
return OgMap(self.target, self.source, mapping_inv,
wfcheck=False)
def image(self):
"""
Returns the image of the map.
Returns
-------
image : :class:`Closed`
The image of the source through the map.
"""
return self.source.all().image(self)
def boundary(self, sign=None, dim=None):
"""
Returns the map restricted to a specified boundary of its source.
Arguments
---------
sign : :class:`str`, optional
Orientation: :code:`'-'` for input, :code:`'+'` for output,
:code:`None` (default) for both.
dim : :class:`int`, optional
Dimension of the boundary (default is :code:`self.dim - 1`).
Returns
-------
boundary : :class:`OgMap`
The map restricted to the requested boundary.
"""
return self.source.boundary(
sign, dim).then(self)
@property
def input(self):
"""
Alias for :code:`boundary('-')`.
"""
return self.boundary('-')
@property
def output(self):
"""
Alias for :code:`boundary('+')`.
"""
return self.boundary('+')
def bot(self):
"""
Functorial extension of :meth:`OgPoset.bot` to maps.
Returns
-------
bot : :code:`OgMap`
The map extended to a map from :code:`source.bot` to
:code:`target.bot`.
"""
source = self.source.bot()
target = self.target.bot()
mapping = [[El(0, 0)]] + [
[El(x.dim + 1, x.pos) for x in n_data]
for n_data in self.mapping]
return OgMap(source, target, mapping,
wfcheck=False)
@staticmethod
def gray(*maps):
"""
Functorial extension of :meth:`OgPoset.gray` to maps of oriented
graded posets.
This method can be called with the math operator :code:`*`, that is,
:code:`fst * snd` is equivalent to :code:`gray(fst, snd)`.
This static method can also be used as a bound method, that is,
:code:`fst.gray(*maps)` is equivalent to :code:`gray(fst, *maps)`.
Arguments
---------
*maps : :class:`OgMap`
Any number of maps of oriented graded posets.
Returns
-------
gray : :class:`OgMap`
The Gray product of the arguments.
Notes
-----
If all the arguments have type :class:`shapes.ShapeMap`, so does their
Gray product.
"""
for f in maps:
utils.typecheck(f, {'type': OgMap})
if len(maps) == 0:
return OgMap.point().id()
if len(maps) == 1:
return maps[0]
fst, snd = maps[0], maps[1]
if len(maps) > 2:
return OgMap.gray(
OgMap.gray(fst, snd), *maps[2:])
size1 = fst.target.size + [0 for _ in range(snd.target.dim)]
size2 = snd.target.size + [0 for _ in range(fst.target.dim)]
def pair(x, y):
if x is None or y is None:
return None
dim = x.dim + y.dim
pos = y.pos + x.pos*size2[y.dim] + sum(
[size1[k]*size2[dim-k] for k in range(x.dim)])
return El(dim, pos)
mapping = [[] for _ in range(fst.source.dim + snd.source.dim + 1)]
for x in fst.source:
for y in snd.source:
mapping[x.dim + y.dim].append(
pair(fst[x], snd[y]))
return OgMap(
OgPoset.gray(fst.source, snd.source),
OgPoset.gray(fst.target, snd.target),
mapping,
wfcheck=False)
def _gray(self, *others):
return self.__class__.gray(self, *others)
@staticmethod
def join(*maps):
"""
Functorial extension of :meth:`OgPoset.join` to maps of oriented
graded posets.
This method can be called with the shift operators :code:`>>`
and :code:`<<`, that is, :code:`fst >> snd` is equivalent to
:code:`join(fst, snd)` and :code:`fst << snd` is equivalent to
:code:`join(snd, fst)`.
This static method can also be used as a bound method, that is,
:code:`fst.join(*maps)` is equivalent to :code:`join(fst, *maps)`.
Arguments
---------
*maps : :class:`OgMap`
Any number of maps of oriented graded posets.
Returns
-------
join : :class:`OgMap`
The join of the arguments.
Notes
-----
If all the arguments have type :class:`shapes.ShapeMap`, so does their
join.
"""
for f in maps:
utils.typecheck(f, {'type': OgMap})
if len(maps) == 0:
return OgPoset.empty().id()
if len(maps) == 1:
return maps[0]
fst, snd = maps[0], maps[1]
if len(maps) > 2:
return OgMap.join(
OgMap.join(fst, snd), *maps[2:])
join_bot = OgMap.gray(
fst.bot(), snd.bot())
mapping = [
[El(x.dim-1, x.pos) for x in n_data]
for n_data in join_bot.mapping[1:]]
return OgMap(
OgPoset.join(fst.source, snd.source),
OgPoset.join(fst.target, snd.target),
mapping,
wfcheck=False)
def _join(self, *others):
return self.__class__.join(self, *others)
@staticmethod
def dual(ogmap, *dims):
"""
Functorial extension of :meth:`OgPoset.dual` to maps of oriented
graded posets.
The dual in all dimensions can also be called with the negation
operator :code:`~`, that is, :code:`~ogmap` is equivalent to
:code:`ogmap.dual()`.
This static method can be also used as a bound method, that is,
:code:`self.dual(*dims)` is equivalent to :code:`dual(self, *dims)`.
Arguments
---------
ogmap : :class:`OgMap`
A map of oriented graded posets.
*dims : :class:`int`
Any number of dimensions; if none, defaults to *all* dimensions.
Returns
-------
dual : :class:`OgMap`
The map dualised in the given dimensions.
Notes
-----
If the map is a :class:`ShapeMap`, so is its dual.
"""
utils.typecheck(ogmap, {'type': OgMap})
return OgMap(OgPoset.dual(ogmap.source, *dims),
OgPoset.dual(ogmap.target, *dims),
ogmap.mapping,
wfcheck=False)
def _dual(self, *dims):
return self.__class__.dual(self, *dims)
def op(self):
"""
Returns the dual in all *odd* dimensions.
"""
dim = max(self.source.dim, self.target.dim)
odds = [n for n in range(dim + 1) if n % 2 == 1]
return self.dual(*odds)
def co(self):
"""
Returns the dual in all *even* dimensions.
"""
dim = max(self.source.dim, self.target.dim)
evens = [n for n in range(dim + 1) if n % 2 == 0]
return self.dual(*evens)
def hasse(self, **params):
"""
Bound version of :meth:`hasse.draw`.
Calling :code:`f.hasse(**params)` is equivalent to calling
:code:`hasse.draw(f, **params)`.
"""
from rewalt.hasse import draw
return draw(self, **params)
# Private methods.
def _extensioncheck(self, element, image):
if image not in self.target:
raise ValueError(utils.value_err(
image, 'not in target'))
if self.isdefined(element):
raise ValueError(utils.value_err(
element, 'already defined on element'))
if image.dim > element.dim:
raise ValueError(utils.value_err(
image, 'exceeds dimension of {}'.format(
repr(element))))
el_underset = self.source.underset(element)
for x in el_underset[:element.dim]:
if not self.isdefined(x):
raise ValueError(utils.value_err(
element, 'map undefined on {} below {}'.format(
repr(x), repr(element))))
img_underset = GrSubset(GrSet(image), self.target).closure()
for n in range(element.dim):
for sign in '-', '+':
if el_underset.boundary(sign, n).image(self) != \
img_underset.boundary(sign, n):
raise ValueError(utils.value_err(
image,
'assignment does not respect '
'({}, {})-boundary of {}'.format(
sign, repr(n), repr(element))))
@staticmethod
def _wfcheck(source, target, mapping):
for x in source, target:
utils.typecheck(x, {'type': OgPoset})
if mapping is not None: # otherwise nothing else to check
utils.typecheck(mapping, {'type': list}, {'type': list})
mapping_size = [len(_) for _ in mapping]
if mapping_size != source.size:
raise ValueError(utils.value_err(
mapping, 'wrong size'))
check_map = OgMap(source, target)
# Extend check_map one element at a time according to data in
# mapping, if this gives no error the check is passed.
for x in source:
if mapping[x.dim][x.pos] is not None:
check_map[x] = mapping[x.dim][x.pos]
class OgMapPair(tuple):
"""
Class for pairs of maps of oriented graded posets.
This is used as the argument and/or return type of pushouts and
coequalisers, which play a prominent role in the theory.
Arguments
---------
fst : :class:`OgMap`
The first map in the pair.
snd : :class:`OgMap`
The second map in the pair.
"""
def __new__(self, fst, snd):
for x in fst, snd:
utils.typecheck(x, {'type': OgMap})
return tuple.__new__(OgMapPair, (fst, snd))
def __str__(self):
return '({}, {})'.format(str(self.fst), str(self.snd))
def __eq__(self, other):
return isinstance(other, OgMapPair) and \
self.fst == other.fst and self.snd == other.snd
@property
def fst(self):
"""
Returns the first map in the pair.
Returns
-------
fst : :class:`OgMap`
The first map in the pair.
"""
return self[0]
@property
def snd(self):
"""
Returns the second map in the pair.
Returns
-------
snd : :class:`OgMap`
The second map in the pair.
"""
return self[1]
@property
def source(self):
"""
Returns the pair of sources of the pair of maps, or, if a
span, their common source.
Returns
-------
source : :class:`OgMap` | :class:`tuple[OgMap]`
The source or sources of the pair.
"""
if self.isspan:
return self.fst.source
return self.fst.source, self.snd.source
@property
def target(self):
"""
Returns the pair of targets of the pair of maps, or, if a
cospan, their common target.
Returns
-------
target : :class:`OgMap` | :class:`tuple[OgMap]`
The target or targets of the pair.
"""
if self.iscospan:
return self.fst.target
return self.fst.target, self.snd.target
@property
def isspan(self):
"""
Returns whether the pair is a span (has a common source).
Returns
-------
isspan : :class:`bool`
:code:`True` if and only if the pair is a span.
"""
return self.fst.source == self.snd.source
@property
def iscospan(self):
"""
Returns whether the pair is a cospan (has a common target).
Returns
-------
iscospan : :class:`bool`
:code:`True` if and only if the pair is a cospan.
"""
return self.fst.target == self.snd.target
@property
def isparallel(self):
"""
Returns whether the pair is parallel (both a span and a cospan).
Returns
-------
isparallel : :class:`bool`
:code:`True` if and only if the pair is parallel.
"""
return self.isspan and self.iscospan
@property
def istotal(self):
"""
Returns whether both maps are total.
Returns
-------
istotal : :class:`bool`
:code:`True` if and only if both maps are total.
"""
return self.fst.istotal and self.snd.istotal
@property
def isinjective(self):
"""
Returns whether both maps are injective.
Returns
-------
isinjective : :class:`bool`
:code:`True` if and only if both maps are injective.
"""
return self.fst.isinjective and self.snd.isinjective
@property
def issurjective(self):
"""
Returns whether both maps are surjective.
Returns
-------
issurjective : :class:`bool`
:code:`True` if and only if both maps are surjective.
"""
return self.fst.issurjective and self.snd.issurjective
def then(self, other, *others):
"""
Returns the composite with other maps or pairs of maps of oriented
graded posets, when defined.
If given two pairs, it composes the first map with the first map,
and the second map with the second map. If given a pair and
a map, it composes both maps in the pair with the map.
Arguments
---------
other : :class:`OgMap` | :class:`OgMapPair`
The first map or pair of maps to follow.
others : :class:`OgMap` | :class:`OgMapPair`, optional
Any number of other maps or pair of maps to follow.
Returns
-------
composite : :class:`OgMapPair`
The composite with all the other arguments.
"""
if len(others) > 0:
return self.then(other).then(*others)
if isinstance(other, OgMapPair):
return OgMapPair(
self.fst.then(other.fst),
self.snd.then(other.snd))
return OgMapPair(
self.fst.then(other),
self.snd.then(other))
def coequaliser(self, **params):
"""
Returns the coequaliser of a parallel pair of total maps,
if it exists.
Keyword arguments
-----------------
wfcheck : :class:`bool`
Check whether the coequaliser is well-defined.
Returns
-------
coequaliser : :class:`OgMap`
The coequaliser of the pair of maps.
Raises
------
:class:`ValueError`
If the pair is not total and parallel.
"""
wfcheck = params.get('wfcheck', True)
if not (self.isparallel and self.istotal):
raise ValueError(utils.value_err(
self,
'expecting a parallel pair of total maps'))
mapping = [
[El(n, k) for k, x in enumerate(n_data)]
for n, n_data in enumerate(self.target.face_data)]
to_delete = GrSet()
shift = [[0 for _ in n_data] for n_data in mapping]
for x in self.source:
x1, x2 = self.fst[x], self.snd[x]
fst, snd = mapping[x1.dim][x1.pos], mapping[x2.dim][x2.pos]
if fst != snd:
if fst.dim == snd.dim:
fst, snd = (fst, snd) if fst.pos < snd.pos else (snd, fst)
else:
fst, snd = (fst, snd) if fst.dim < snd.dim else (snd, fst)
if snd not in to_delete:
to_delete.add(snd)
for k in range(snd.pos, len(shift[snd.dim])):
shift[snd.dim][k] -= 1
mapping[snd.dim][snd.pos] = mapping[fst.dim][fst.pos]
mapping = [
[x.shifted(shift[x.dim][x.pos]) for x in n_data]
for n_data in mapping]
face_data = [
[
{sign:
{mapping[n-1][j].pos
for j in x[sign] if mapping[n-1][j].dim == n-1}
for sign in ('-', '+')}
for k, x in enumerate(n_data) if El(n, k) not in to_delete
]
for n, n_data in enumerate(self.target.face_data)]
quotient = OgPoset.from_face_data(face_data, wfcheck=wfcheck)
return OgMap(self.target, quotient, mapping, wfcheck=False)
def pushout(self, **params):
"""
Returns the pushout of a span of total maps, if it exists.
Pushouts do not always exist in the category of oriented graded
posets and maps; however, pushouts of injective (total) maps do always
exist.
Keyword arguments
-----------------
wfcheck : :class:`bool`
Check whether the pushout is well-defined.
Returns
-------
pushout : :class:`OgMapPair`
The pushout cospan of the pair of maps.
Raises
------
:class:`ValueError`
If the pair is not total and a span.
"""
wfcheck = params.get('wfcheck', True)
if not (self.isspan and self.istotal):
raise ValueError(utils.value_err(
self,
'expecting a span of injective total maps'))
coproduct = OgPoset.coproduct(self.fst.target, self.snd.target)
coequaliser = self.then(coproduct).coequaliser(wfcheck=wfcheck)
return coproduct.then(coequaliser)
| 85,707 | 29.210786 | 78 |
py
|
rewalt
|
rewalt-main/rewalt/utils.py
|
"""
Utility functions for rewalt.
"""
def typecheck(x, constraint, *more_constraints):
"""
Type and constraint checking function.
"""
if not isinstance(x, constraint['type']):
raise TypeError(type_err(constraint['type'], x))
if 'st' in constraint:
if not constraint['st'](x):
raise ValueError(value_err(x, constraint['why']))
if more_constraints and hasattr(x, '__iter__'):
for y in x:
if isinstance(x, dict):
typecheck(x[y], *more_constraints)
else:
typecheck(y, *more_constraints)
def type_err(expected, got):
""" Type error message. """
return "{} (expected {}.{}, got {}.{} instead).".format(
repr(got), expected.__module__, expected.__name__,
type(got).__module__, type(got).__name__)
def value_err(got, why):
""" Value error message. """
return "{} ({}).".format(repr(got), why)
def mksign(key):
""" Used to turn various expressions into '-' or '+'. """
if key in ['-', 0,
'i', 'in', 'input',
'd', 'dom', 'domain',
's', 'src', 'source']:
return '-'
if key in ['+', 1,
'o', 'out', 'output',
'c', 'cod', 'codomain',
't', 'tgt', 'target']:
return '+'
raise KeyError(str(key))
def flip(sign):
""" Flips the sign. """
flipped = '-' if sign == '+' else '+'
return flipped
| 1,471 | 25.763636 | 61 |
py
|
rewalt
|
rewalt-main/rewalt/hasse.py
|
"""
Implements oriented Hasse diagram visualisation.
"""
import networkx as nx
from rewalt import utils, ogposets, diagrams, drawing
DEFAULT = {
'tikz': False,
'show': True,
'bgcolor': 'white',
'fgcolor': 'black',
'labels': True,
'inputcolor': 'magenta',
'outputcolor': 'blue',
'orientation': 'bt'}
class Hasse:
"""
Class for "oriented Hasse diagrams" of oriented graded posets.
The oriented Hasse diagram is stored as a NetworkX directed graph
whose nodes are the elements of the oriented graded poset.
The orientation information is encoded by having edges corresponding
to input faces point *from* the face, and edges corresponding to
output faces point *towards* the face. To recover the underlying
poset's Hasse diagram, it suffices to reverse the edges that point
from an element of higher dimension.
Objects of the class can also store labels for nodes of the Hasse
diagram, for example the images of the corresponding elements
through a map or a diagram.
The class also has a method :meth:`draw` that outputs a visualisation
of the Hasse diagram. This works with any :class:`drawing.DrawBackend`;
currently available are
- a Matplotlib backend, and
- a TikZ backend.
Arguments
---------
ogp : :class:`ogposets.OgPoset | ogposets.OgMap | diagrams.Diagram`
The oriented graded poset, or a map of oriented graded posets,
or a diagram.
Notes
-----
If given a map of oriented graded posets (or shapes), produces the
Hasse diagram of its source, with nodes labelled with the images
of elements through the map.
If given a diagram, produces the Hasse diagram of its shape, with
nodes labelled with the images of elements through the diagram.
"""
def __init__(self, ogp):
if isinstance(ogp, ogposets.OgPoset):
self._labels = ogp.id().mapping
elif isinstance(ogp, diagrams.Diagram):
self._labels = ogp.mapping
ogp = ogp.shape
elif isinstance(ogp, ogposets.OgMap):
self._labels = ogp.mapping
ogp = ogp.source
else:
raise TypeError(utils.type_err(
ogposets.OgPoset, ogp))
self._nodes = ogp.all().support
diagram = nx.DiGraph()
diagram.add_nodes_from(self.nodes)
for x in self.nodes[1:]:
for y in ogp.faces(x, '-'):
diagram.add_edge(y, x, sign='-')
for y in ogp.faces(x, '+'):
diagram.add_edge(x, y, sign='+')
self._diagram = diagram
@property
def nodes(self):
"""
Returns the set of nodes of the Hasse diagram, that is, the
graded set of elements of the oriented graded poset it encodes.
Returns
-------
nodes : :class:`ogposets.GrSet`
The set of nodes of the Hasse diagram.
"""
return self._nodes
@property
def diagram(self):
"""
Returns the oriented Hasse diagram as a NetworkX graph.
Returns
-------
diagram : :class:`networkx.DiGraph`
The oriented Hasse diagram.
"""
return self._diagram
@property
def labels(self):
"""
Returns the labels of nodes of the Hasse diagram, in the
same format as :meth:`ogposets.OgMap.mapping`.
Returns
-------
labels : :class:`list[list]`
The labels of the Hasse diagram.
"""
return self._labels
def place_nodes(self):
"""
Places the nodes of the Hasse diagram on the unit square
canvas, and returns their coordinates.
The nodes are placed on different heights according to the
dimension of the element their correspond to.
Elements of the same dimension are then placed at different
widths in order of position.
The coordinates are returned as a dictionary whose keys are
the elements corresponding to nodes of the diagram.
Returns
-------
coordinates : :class:`dict[tuple[float]]`
The coordinates assigned to nodes.
"""
dim = self.nodes.dim
if dim < 0:
return dict()
ystep = 1/(dim+1)
xstep = [1/(len(self.nodes[n])) for n in range(dim+1)]
coordinates = dict()
for x in self.nodes:
coordinates[x] = (
(x.pos + 0.5)*xstep[x.dim],
(x.dim + 0.5)*ystep
)
return coordinates
def draw(self, **params):
"""
Outputs a visualisation of the Hasse diagram, using a backend.
Currently supported are a Matplotlib backend and a TikZ backend;
in both cases it is possible to show the output (as a pop-up
window for Matplotlib, or as code for TikZ) or save to file.
Various customisation options are available, including different
orientations and colours.
Keyword arguments
-----------------
tikz : :class:`bool`
Whether to output TikZ code (default is :code:`False`).
show : :class:`bool`
Whether to show the output (default is :code:`True`).
path : :class:`str`
Path where to save the output (default is :code:`None`).
orientation : :class:`str`
Orientation of the Hasse diagram: one of :code:`'bt'`
(bottom-to-top), :code:`'lr'` (left-to-right),
:code:`'tb'` (top-to-bottom), :code:`'rl'` (right-to-left)
(default is :code:`'bt'`).
bgcolor : multiple types
The background colour (default is :code:`'white'`).
fgcolor : multiple types
The foreground colour, given by default to nodes
and labels (default is :code:`'black'`).
labels : :class:`bool`
Whether to display node labels (default is :code:`True`).
inputcolor : multiple types
The colour of edges corresponding to input faces
(default is :code:`'magenta'`).
outputcolor : multiple types
The colour of edges corresponding to output faces
(default is :code:`'blue'`).
xscale : :class:`float`
(TikZ only) Scale factor to apply to x axis in output
(default is based on the dimension and maximal number of
elements in one dimension).
yscale : :class:`float`
(TikZ only) Scale factor to apply to y axis in output
(default is based on the dimension and maximal number of
elements in one dimension).
"""
# Parameters
tikz = params.get('tikz', DEFAULT['tikz'])
show = params.get('show', DEFAULT['show'])
path = params.get('path', None)
orientation = params.get(
'orientation', DEFAULT['orientation'])
xscale = params.get('xscale', None)
yscale = params.get('yscale', None)
SCALE = (
max(
(len(self.nodes[n]) for n in range(self.nodes.dim+1)),
default=0),
2*self.nodes.dim
)
if orientation in ('bt', 'tb'):
xscale = SCALE[0] if xscale is None else xscale
yscale = SCALE[1] if yscale is None else yscale
if orientation in ('lr', 'rl'):
xscale = SCALE[1] if xscale is None else xscale
yscale = SCALE[0] if yscale is None else yscale
bgcolor = params.get(
'bgcolor', DEFAULT['bgcolor'])
fgcolor = params.get(
'fgcolor', DEFAULT['fgcolor'])
labels = params.get(
'labels', DEFAULT['labels'])
inputcolor = params.get(
'inputcolor', DEFAULT['inputcolor'])
outputcolor = params.get(
'outputcolor', DEFAULT['outputcolor'])
coord = self.place_nodes()
backendclass = drawing.TikZBackend if tikz else drawing.MatBackend
backend = backendclass(
bgcolor=bgcolor,
fgcolor=fgcolor,
orientation=orientation)
for node in self.nodes:
if labels:
label = '{},{}'.format(
node.pos,
self.labels[node.dim][node.pos])
else:
label = node.pos
backend.draw_label(
label,
coord[node],
(0, 0),
ha='center',
va='center')
for edge in self.diagram.edges:
color = inputcolor if self.diagram.edges[edge]['sign'] == '-' \
else outputcolor
backend.draw_arrow(
coord[edge[0]],
coord[edge[1]],
color=color,
shorten=0.8)
backend.output(path=path, show=show, xscale=xscale, yscale=yscale)
def draw(*ogps, **params):
"""
Given any number of oriented graded posets, or maps, or diagrams,
generates their Hasse diagrams and draws them.
This is the same as generating the Hasse diagram for each
argument, and calling :meth:`Hasse.draw` with the given
parameters on each one of them.
Arguments
---------
*ogps : :class:`ogposets.OgPoset | ogposets.OgMap | diagrams.Diagram`
Any number of oriented graded posets or maps or diagrams.
Keyword arguments
-----------------
**params
Passed to :meth:`Hasse.draw`.
"""
for ogp in ogps:
Hasse(ogp).draw(**params)
| 9,695 | 32.09215 | 75 |
py
|
rewalt
|
rewalt-main/rewalt/shapes.py
|
"""
Implements shapes of cells and diagrams.
"""
import networkx as nx
from rewalt import utils
from rewalt.ogposets import (El, OgPoset, GrSet, GrSubset, Closed,
OgMap, OgMapPair)
class Shape(OgPoset):
"""
Inductive subclass of :class:`ogposets.OgPoset` for shapes of
cells and diagrams.
Properly formed objects of the class are unique encodings of the
*regular molecules* from the theory of diagrammatic sets (plus the
empty shape, which is not considered a regular molecule).
To create shapes, we start from basic constructors such as
:meth:`empty`, :meth:`point`, or one of the named shape constructors,
such as :meth:`globe`, :meth:`simplex`, :meth:`cube`.
Then we generate new shapes by gluing basic shapes together with
:meth:`paste`, :meth:`to_inputs`, :meth:`to_outputs`, or by
producing new higher-dimensional shapes with operations such as
:meth:`atom`, :meth:`gray`, :meth:`join`.
When possible, the constructors place the shapes in appropriate
subclasses of separate interest, which include the *globes*,
the *oriented simplices*, the *oriented cubes*, and the
*positive opetopes*. This is to enable the specification of special
methods for subclasses of shapes.
The following diagram summarises the hierarchy of subclasses of
shapes:
::
Simplex Cube OpetopeTree Theta
| |\ |\ | | |
| | \ | \ Opetope GlobeString
| | \| \ | /
| | \ \ Globe
| | |\ \/ |
Empty | | \ /\ |
| | \/ \ |
| | /\ \ |
| | / \ \ |
| |/ \ \|
Point Arrow
Currently only the :class:`Cube` and :class:`Simplex` classes have
special methods implemented.
"""
def __new__(self):
return OgPoset.__new__(Empty)
@property
def isatom(self):
"""
Returns whether the shape is an atom (has a greatest element).
Returns
-------
isatom : :class:`bool`
:code:`True` if and only if the shape has a greatest element.
Examples
--------
>>> arrow = Shape.arrow()
>>> assert arrow.isatom
>>> assert not arrow.paste(arrow).isatom
"""
return len(self.maximal()) == 1
@property
def isround(self):
"""
Shorthand for :code:`all().isround`.
"""
return self.all().isround
@property
def layers(self):
"""
Returns the current layering of the shape.
Returns
-------
layers : :class:`list[ShapeMap]`
The current layering.
Examples
--------
>>> arrow = Shape.arrow()
>>> globe = Shape.globe(2)
>>> cospan = globe.paste(arrow).paste(
... arrow.paste(globe), cospan=True)
>>> shape = cospan.target
>>> assert shape.layers == [cospan.fst, cospan.snd]
"""
return self.id().layers
@property
def rewrite_steps(self):
"""
Returns the sequence of rewrite steps associated to the current
layering of the shape.
The :code:`0`-th rewrite step is the input boundary of the shape.
For :code:`n > 0`, the :code:`n`-th rewrite step is the output
boundary of the :code:`(n-1)`-th layer.
Returns
-------
rewrite_steps : :class:`list[ShapeMap]`
The current sequence of rewrite steps.
Examples
--------
>>> arrow = Shape.arrow()
>>> globe = Shape.globe(2)
>>> cospan = globe.paste(arrow).paste(
... arrow.paste(globe), cospan=True)
>>> shape = cospan.target
>>> assert shape.rewrite_steps == [
... cospan.fst.input,
... cospan.fst.output,
... cospan.snd.output]
"""
return self.id().rewrite_steps
# Main constructors
@staticmethod
def atom(fst, snd, **params):
"""
Given two shapes with identical round boundaries, returns a new
atomic shape whose input boundary is the first one and output
boundary the second one.
Arguments
---------
fst : :class:`Shape`
The input boundary shape.
snd : :class:`Shape`
The output boundary shape.
Keyword arguments
-----------------
cospan : :class:`bool`
Whether to return the cospan of inclusions of the input and
output boundaries (default is :code:`False`).
Returns
-------
atom : :class:`Shape` | :class:`ogposets.OgMapPair`
The new atomic shape (optionally with the cospan of
inclusions of its boundaries).
Raises
------
:class:`ValueError`
If the boundaries do not match, or are not round.
Examples
--------
We create a 2-dimensional cell shape with two input 1-cells
and one output 2-cell.
>>> arrow = Shape.arrow()
>>> binary = arrow.paste(arrow).atom(arrow)
>>> binary.draw(path='docs/_static/img/Shape_atom.png')
.. image:: ../_static/img/Shape_atom.png
:width: 400
:align: center
"""
cospan = params.get('cospan', False)
for u in fst, snd:
utils.typecheck(u, {
'type': Shape,
'st': lambda v: v.isround,
'why': 'expecting a round Shape'})
if fst.dim != snd.dim:
raise ValueError(utils.value_err(
snd, 'dimension does not match dimension of {}'.format(
repr(fst))))
dim = fst.dim
if dim == -1: # Avoid more work in this simple case
if cospan:
return OgMapPair(
Shape.point().initial(),
Shape.point().initial()
)
return Shape.point()
in_span = OgMapPair(
fst.boundary('-'), snd.boundary('-'))
out_span = OgMapPair(
fst.boundary('+'), snd.boundary('+'))
if not in_span.isspan:
raise ValueError(utils.value_err(
snd, 'input boundary does not match '
'input boundary of {}'.format(repr(fst))))
if not out_span.isspan:
raise ValueError(utils.value_err(
snd, 'output boundary does not match '
'output boundary of {}'.format(repr(fst))))
glue_in = in_span.pushout(wfcheck=False)
glue_out = out_span.then(glue_in).coequaliser(wfcheck=False)
inclusion = glue_in.then(glue_out)
sphere = inclusion.target
# Add a greatest element
face_data = [
*sphere.face_data,
[
{'-':
{inclusion.fst[x].pos for x in fst[dim]},
'+':
{inclusion.snd[x].pos for x in snd[dim]}}
]]
coface_data = [
*sphere.coface_data,
[{'-': set(), '+': set()}]]
for x in fst[dim]:
coface_data[dim][inclusion.fst[x].pos]['-'].add(0)
for x in snd[dim]:
coface_data[dim][inclusion.snd[x].pos]['+'].add(0)
ogatom = OgPoset(face_data, coface_data,
wfcheck=False, matchcheck=False)
def inheritance():
if isinstance(fst, OpetopeTree) and isinstance(snd, Opetope):
if fst.dim == 0:
return Arrow
if isinstance(fst, Globe):
return Globe
return Opetope
return Shape
if cospan:
boundary_in = OgMap(
fst, ogatom, inclusion.fst.mapping,
wfcheck=False)
boundary_out = OgMap(
snd, ogatom, inclusion.snd.mapping,
wfcheck=False)
atom_cospan = OgMapPair(boundary_in, boundary_out).then(
Shape._reorder(ogatom).inv())
return inheritance()._upgrademaptgt(atom_cospan)
atom = Shape._reorder(ogatom).source
return inheritance()._upgrade(atom)
def _atom(self, other, **params):
return Shape.atom(self, other, **params)
@staticmethod
def paste(fst, snd, dim=None, **params):
"""
Given two shapes and :code:`k` such that the output
:code:`k`-boundary of the first is equal to the input
:code:`k`-boundary of the second, returns their pasting along
the matching boundaries.
Arguments
---------
fst : :class:`Shape`
The first shape.
snd : :class:`Shape`
The second shape.
dim : :class:`int`, optional
The dimension of the boundary along which they will be pasted
(default is :code:`min(fst.dim, snd.dim) - 1`).
Keyword arguments
-----------------
cospan : :class:`bool`
Whether to return the cospan of inclusions of the two shapes
into the pasting (default is :code:`False`).
Returns
-------
paste : :class:`Shape` | :class:`ogposets.OgMapPair`
The pasted shape (optionally with the cospan of
inclusions of its components).
Raises
------
:class:`ValueError`
If the boundaries do not match.
Examples
--------
We can paste two 2-dimensional globes either "vertically" along
their 1-dimensional boundary or "horizontally" along their
0-dimensional boundary.
>>> globe = Shape.globe(2)
>>> vert = globe.paste(globe)
>>> horiz = globe.paste(globe, 0)
>>> vert.draw(path='docs/_static/img/Shape_paste_vert.png')
.. image:: ../_static/img/Shape_paste_vert.png
:width: 400
:align: center
>>> horiz.draw(path='docs/_static/img/Shape_paste_horiz.png')
.. image:: ../_static/img/Shape_paste_horiz.png
:width: 400
:align: center
We can also check that the interchange equation holds.
>>> assert vert.paste(vert, 0) == horiz.paste(horiz)
>>> horiz.paste(horiz).draw(
... path='docs/_static/img/Shape_paste_interchange.png')
.. image:: ../_static/img/Shape_paste_interchange.png
:width: 400
:align: center
"""
cospan = params.get('cospan', False)
for u in fst, snd:
utils.typecheck(u, {'type': Shape})
if dim is None: # default is principal composition
dim = min(fst.dim, snd.dim) - 1
utils.typecheck(dim, {
'type': int,
'st': lambda n: n >= 0,
'why': 'expecting non-negative integer'})
span = OgMapPair(
fst.boundary('+', dim),
snd.boundary('-', dim))
if not span.isspan:
raise ValueError(utils.value_err(
snd,
'input {}-boundary does not match '
'output {}-boundary of {}'.format(
str(dim), str(dim), repr(fst))))
if dim >= fst.dim:
return OgMapPair(span.snd, snd.id()) if cospan else snd
if dim >= snd.dim:
return OgMapPair(fst.id(), span.fst) if cospan else fst
pushout = span.pushout(wfcheck=False)
paste_cospan = pushout.then(Shape._reorder(pushout.target).inv())
def inheritance():
if isinstance(fst, Theta) and isinstance(snd, Theta):
if isinstance(fst, GlobeString) and \
isinstance(snd, GlobeString) \
and fst.dim == snd.dim == dim+1:
return GlobeString
return Theta
if isinstance(fst, OpetopeTree) and isinstance(snd, GlobeString) \
and fst.dim == snd.dim == dim+1:
return OpetopeTree
return Shape
paste_cospan = inheritance()._upgrademaptgt(paste_cospan)
if fst.dim == snd.dim == dim + 1: # add layering
if hasattr(fst, '_layering') and len(fst[fst.dim]) > 1:
layering_fst = fst._layering
else:
layering_fst = [fst.id()]
if hasattr(snd, '_layering') and len(snd[snd.dim]) > 1:
layering_snd = snd._layering
else:
layering_snd = [snd.id()]
layering = [
*[f.then(paste_cospan.fst) for f in layering_fst],
*[f.then(paste_cospan.snd) for f in layering_snd]]
paste_cospan.target._layering = layering
if cospan:
return paste_cospan
return paste_cospan.target
def _paste(self, other, dim=None, **params):
return Shape.paste(self, other, dim, **params)
# Other constructors
@staticmethod
def paste_along(fst, snd, **params):
"""
Given a span of shape maps, where one is the inclusion of the
input (resp output) :code:`k`-boundary of a shape,
and the other the inclusion of a round subshape of the
output (resp input) :code:`k`-boundary of another shape,
returns the pasting (pushout) of the two shapes along the span.
In practice, it is convenient to use :meth:`to_inputs` and
:meth:`to_outputs` instead, where the data of the span is specified
by :code:`k` and the positions of the :code:`k`-dimensional
elements in the round subshape along which the pasting occurs.
Arguments
---------
fst : :class:`ShapeMap`
The first inclusion.
snd : :class:`ShapeMap`
The second inclusion.
Keyword arguments
-----------------
wfcheck : :class:`bool`
Check if the span gives rise to a well-formed pasting
(default is :code:`True`).
cospan : :class:`bool`
Whether to return the cospan of inclusions of the two shapes
into the pasting (default is :code:`False`).
Returns
-------
paste_along : :class:`Shape` | :class:`ogposets.OgMapPair`
The pasted shape (optionally with the cospan of
inclusions of its components).
Raises
------
:class:`ValueError`
If the pair of maps is not an injective span.
"""
wfcheck = params.get('wfcheck', True)
cospan = params.get('cospan', False)
span = OgMapPair(fst, snd)
utils.typecheck(span, {
'type': OgMapPair,
'st': lambda x: x.isspan and x.isinjective,
'why': 'expecting a span of injective maps'
}, {'type': ShapeMap})
dim = span.source.dim
fst_image = fst.source.maximal().image(fst)
snd_image = snd.source.maximal().image(snd)
fst_output = fst.target.all().boundary_max('+', dim)
snd_input = snd.target.all().boundary_max('-', dim)
if fst_image == fst_output and snd_image == snd_input:
return Shape.paste(fst.target, snd.target, dim,
cospan=cospan)
if wfcheck:
def condition():
t1 = fst_image.issubset(fst_output)
t2 = snd_image.issubset(snd_input)
t3 = fst_image == fst_output or snd_image == snd_input
return t1 and t2 and t3
if not condition():
raise ValueError(utils.value_err(
span, 'not a well-formed span for pasting'))
if fst_image == fst_output:
if not snd.target._ispastable(
snd_image.support, snd_input.support):
raise ValueError(utils.value_err(
snd, 'cannot paste along this map'))
else:
if not fst.target._ispastable(
fst_image.support, fst_output.support):
raise ValueError(utils.value_err(
fst, 'cannot paste along this map'))
pushout = span.pushout(wfcheck=False)
def inheritance():
if isinstance(fst.target, OpetopeTree) and \
isinstance(snd.target, OpetopeTree) \
and fst.target.dim == snd.target.dim == dim+1:
return OpetopeTree
return Shape
if cospan:
paste_cospan = pushout.then(
Shape._reorder(pushout.target).inv())
return inheritance()._upgrademaptgt(paste_cospan)
paste = Shape._reorder(pushout.target).source
return inheritance()._upgrade(paste)
def to_outputs(self, positions, other, dim=None, **params):
"""
Returns the pasting of another shape along a round subshape of
the output :code:`k`-boundary, specified by the positions of its
:code:`k`-dimensional elements.
Arguments
---------
positions : :class:`list[int]` | :class:`int`
The positions of the outputs along which to paste. If given
an integer :code:`n`, interprets it as the list :code:`[n]`.
other : :class:`Shape`
The other shape to paste.
dim : :class:`int`, optional
The dimension of the boundary along which to paste
(default is :code:`self.dim - 1`)
Keyword arguments
-----------------
cospan : :class:`bool`
Whether to return the cospan of inclusions of the two shapes
into the pasting (default is :code:`False`).
Returns
-------
to_outputs : :class:`Shape` | :class:`ogposets.OgMapPair`
The pasted shape (optionally with the cospan of
inclusions of its components).
Raises
------
:class:`ValueError`
If the boundaries do not match, or the pasting does not produce
a well-formed shape.
Examples
--------
We create a 2-simplex and visualise it as a string diagram with the
:code:`positions` parameter enabled.
>>> simplex = Shape.simplex(2)
>>> simplex.draw(
... positions=True, path='docs/_static/img/Shape_to_outputs1.png')
.. image:: ../_static/img/Shape_to_outputs1.png
:width: 400
:align: center
We paste another 2-simplex to the output in position :code:`2`.
>>> paste1 = simplex.to_outputs(2, simplex)
>>> paste1.draw(
... positions=True, path='docs/_static/img/Shape_to_outputs2.png')
.. image:: ../_static/img/Shape_to_outputs2.png
:width: 400
:align: center
Finally, we paste the *dual* of a 2-simplex to the outputs in
positions :code:`2, 3`.
>>> paste2 = paste1.to_outputs([1, 3], simplex.dual())
>>> paste2.draw(
... positions=True, path='docs/_static/img/Shape_to_outputs3.png')
.. image:: ../_static/img/Shape_to_outputs3.png
:width: 400
:align: center
"""
if isinstance(positions, int):
positions = [positions]
if dim is None:
dim = self.dim-1
fst = GrSet(*[El(dim, pos) for pos in positions])
snd = self.all().boundary_max('+', dim).support
if not self._ispastable(fst, snd):
raise ValueError(utils.value_err(
positions, 'cannot paste to these outputs'))
oginclusion = GrSubset(fst, self).closure().as_map
inclusion = ShapeMap(Shape._reorder(
oginclusion.source).then(oginclusion),
wfcheck=False)
other_boundary = other.boundary('-')
if inclusion.source != other_boundary.source:
raise ValueError(utils.value_err(
positions, 'does not match input boundary of {}'.format(
repr(other))))
return Shape.paste_along(
inclusion,
other_boundary, wfcheck=False, **params)
def to_inputs(self, positions, other, dim=None, **params):
"""
Returns the pasting of another shape along a round subshape
of the input :code:`k`-boundary, specified by the positions of its
:code:`k`-dimensional elements.
Arguments
---------
positions : :class:`list[int]` | :class:`int`
The positions of the inputs along which to paste. If given
an integer :code:`n`, interprets it as the list :code:`[n]`.
other : :class:`Shape`
The other shape to paste.
dim : :class:`int`, optional
The dimension of the boundary along which to paste
(default is :code:`self.dim - 1`)
Keyword arguments
-----------------
cospan : :class:`bool`
Whether to return the cospan of inclusions of the two shapes
into the pasting (default is :code:`False`).
Returns
-------
to_inputs : :class:`Shape` | :class:`ogposets.OgMapPair`
The pasted shape (optionally with the cospan of
inclusions of its components).
Raises
------
:class:`ValueError`
If the boundaries do not match, or the pasting does not produce
a well-formed shape.
Examples
--------
We work dually to the example for :meth:`to_outputs`.
>>> binary = Shape.simplex(2).dual()
>>> binary.draw(
... positions=True, path='docs/_static/img/Shape_to_inputs1.png')
.. image:: ../_static/img/Shape_to_inputs1.png
:width: 400
:align: center
>>> paste1 = binary.to_inputs(1, binary)
>>> paste1.draw(
... positions=True, path='docs/_static/img/Shape_to_inputs2.png')
.. image:: ../_static/img/Shape_to_inputs2.png
:width: 400
:align: center
>>> paste2 = paste1.to_inputs([0, 1], binary.dual())
>>> paste2.draw(
... positions=True, path='docs/_static/img/Shape_to_inputs3.png')
.. image:: ../_static/img/Shape_to_inputs3.png
:width: 400
:align: center
"""
if isinstance(positions, int):
positions = [positions]
if dim is None:
dim = self.dim-1
fst = GrSet(*[El(dim, pos) for pos in positions])
snd = self.all().boundary_max('-', dim).support
if not self._ispastable(fst, snd):
raise ValueError(utils.value_err(
positions, 'cannot paste to these inputs'))
oginclusion = GrSubset(fst, self).closure().as_map
inclusion = ShapeMap(Shape._reorder(
oginclusion.source).then(oginclusion),
wfcheck=False)
other_boundary = other.boundary('+')
if inclusion.source != other_boundary.source:
raise ValueError(utils.value_err(
positions, 'does not match output boundary of {}'.format(
repr(other))))
return Shape.paste_along(
other_boundary,
inclusion,
wfcheck=False, **params)
@staticmethod
def suspend(shape, n=1):
"""
Returns the n-fold suspension of a shape.
This static method can be also used as a bound method after
an object is initialised, that is, :code:`shape.suspend(n)` is
equivalent to :code:`suspend(shape, n)`.
Arguments
---------
shape : :class:`Shape`
The object to suspend.
n : :class:`int`, optional
The number of iterations of the suspension (default is 1).
Returns
-------
suspension : :class:`Shape`
The suspended shape.
Examples
--------
The suspension of the point is the arrow, and the suspension of
an arrow is the 2-globe.
>>> assert Shape.point().suspend() == Shape.arrow()
>>> assert Shape.arrow().suspend() == Shape.globe(2)
In general, the suspension of the n-globe is the (n+1)-globe.
"""
if n == 0:
return shape
if not isinstance(shape, Shape) or isinstance(shape, Empty):
return OgPoset.suspend(shape, n)
if isinstance(shape, Point) and n == 1:
return Arrow()
suspension = Shape._reorder(
OgPoset.suspend(shape, n)).source
def inheritance():
if isinstance(shape, Theta):
if isinstance(shape, GlobeString):
if isinstance(shape, Globe):
return Globe
return GlobeString
return Theta
return Shape
return inheritance()._upgrade(suspension)
@staticmethod
def gray(*shapes):
"""
Returns the Gray product of any number of shapes.
This method can be called with the math operator :code:`*`, that is,
:code:`fst * snd` is equivalent to :code:`gray(fst, snd)`.
This static method can also be used as a bound method after an object
is initialised, that is, :code:`fst.gray(*shapes)` is equivalent to
:code:`gray(fst, *shapes)`.
Arguments
---------
*shapes : :class:`Shape`
Any number of shapes.
Returns
-------
gray : :class:`Shape`
The Gray product of the arguments.
Example
-------
The point is a unit for the Gray product.
>>> point = Shape.point()
>>> arrow = Shape.arrow()
>>> assert point*arrow == arrow*point == arrow
The Gray product of two arrows is the oriented square (2-cube).
>>> arrow = Shape.arrow()
>>> assert arrow*arrow == Shape.cube(2)
In general, the Gray product of the n-cube with the k-cube
is the (n+k)-cube.
"""
for x in shapes:
if not isinstance(x, Shape):
return OgPoset.gray(*shapes)
if len(shapes) == 0:
return Point()
if len(shapes) == 1:
return shapes[0]
oggray = OgPoset.gray(*shapes)
if oggray in shapes:
return oggray
gray = Shape._reorder(oggray).source
def inheritance(l):
if all([isinstance(x, Cube) for x in l]):
return Cube
return Shape
return inheritance(shapes)._upgrade(gray)
@staticmethod
def join(*shapes):
"""
Returns the join of any number of shapes.
This method can be called with the shift operators :code:`>>`
and :code:`<<`, that is, :code:`fst >> snd` is equivalent to
:code:`join(fst, snd)` and :code:`fst << snd` is equivalent to
:code:`join(snd, fst)`.
This static method can also be used as a bound method after an
object is initialised, that is, :code:`fst.join(*shapes)` is
equivalent to :code:`join(fst, *shapes)`.
Arguments
---------
*shapes : :class:`Shape`
Any number of shapes.
Returns
-------
join : :class:`Shape`
The join of the arguments.
Examples
--------
The empty shape is a unit for the join.
>>> empty = Shape.empty()
>>> point = Shape.point()
>>> assert empty >> point == point >> empty == point
The join of two points is the arrow, and the join of an arrow
and a point is the 2-simplex.
>>> arrow = Shape.arrow()
>>> assert point >> point == Shape.arrow()
>>> assert arrow >> point == Shape.simplex(2)
In general, the join of an n-simplex with a k-simplex is
the (n+k+1)-simplex.
"""
for x in shapes:
if not isinstance(x, Shape):
return OgPoset.join(*shapes)
if len(shapes) == 0:
return Empty()
if len(shapes) == 1:
return shapes[0]
ogjoin = OgPoset.join(*shapes)
if len(ogjoin) == 3: # check explicitly if it's arrow
return Arrow()
if ogjoin in shapes:
return ogjoin
join = Shape._reorder(ogjoin).source
def inheritance(l):
if all([isinstance(x, Simplex) for x in l]):
return Simplex
return Shape
return inheritance(shapes)._upgrade(join)
@staticmethod
def dual(shape, *dims, **params):
"""
Returns the shape with orientations reversed in given dimensions.
The dual in all dimensions can also be called with the bit negation
operator :code:`~`, that is, :code:`~shape` is equivalent to
:code:`shape.dual()`.
This static method can be also used as a bound method after an object
is initialised, that is, :code:`shape.dual(*dims)` is equivalent to
:code:`dual(shape, *dims)`.
Arguments
---------
shape : :class:`Shape`
A shape.
*dims : :class:`int`
Any number of dimensions; if none, defaults to *all* dimensions.
Returns
-------
dual : :class:`Shape`
The shape, dualised in the given dimensions.
Examples
--------
>>> arrow = Shape.arrow()
>>> simplex = Shape.simplex(2)
>>> binary = arrow.paste(arrow).atom(arrow)
>>> assert binary == simplex.dual()
>>> assoc_l = binary.to_inputs(0, binary)
>>> assoc_r = binary.to_inputs(1, binary)
>>> assert assoc_r == assoc_l.dual(1)
"""
reordering = params.get('reordering', False)
reordermap = Shape._reorder(OgPoset.dual(shape, *dims))
dual = reordermap.source
if shape == dual:
if reordering:
return shape.id()
return shape
def inheritance():
if isinstance(shape, Theta):
return Theta
return Shape
if reordering:
return inheritance()._upgrademapsrc(reordermap)
return inheritance()._upgrade(dual)
def merge(self):
"""
Returns the unique atomic shape with the same boundary,
if the shape is round.
Returns
-------
merge : :class:`Shape`
The unique atomic shape with the same boundary.
Raises
------
:class:`ValueError`
If the shape is not round.
Examples
--------
We create a 2-dimensional shape with two input 1-cells and
one output 1-cell, and paste it to itself along one of the
inputs.
>>> arrow = Shape.arrow()
>>> binary = arrow.paste(arrow).atom(arrow)
>>> to_merge = binary.to_inputs(1, binary)
>>> to_merge.draw(path='docs/_static/img/Shape_merge1.png')
.. image:: ../_static/img/Shape_merge1.png
:width: 400
:align: center
The "merged" shape is the 2-dimensional atom with three input
2-cells and one output 1-cell.
>>> merged = to_merge.merge()
>>> merged.draw(path='docs/_static/img/Shape_merge2.png')
.. image:: ../_static/img/Shape_merge2.png
:width: 400
:align: center
"""
if self.isatom:
return self
if not self.isround:
raise ValueError(utils.value_err(
self, 'not a round shape'))
merged = Shape.atom(
self.boundary('-').source,
self.boundary('+').source)
def inheritance():
if self.dim == 1:
return Arrow
if isinstance(self, GlobeString):
return Globe
if isinstance(self, OpetopeTree):
return Opetope
return Shape
return inheritance()._upgrade(merged)
# Named shapes
@staticmethod
def empty():
"""
Constructs the initial, empty shape.
Returns
-------
empty : :class:`Empty`
The empty shape.
"""
return Empty()
@staticmethod
def point():
"""
Constructs the terminal shape, consisting of a single point.
Returns
-------
point : :class:`Point`
The point.
"""
return Point()
@staticmethod
def arrow():
"""
Constructs the arrow, the unique 1-dimensional atomic shape.
Returns
-------
arrow : :class:`Arrow`
The arrow.
"""
return Arrow()
@staticmethod
def simplex(dim=-1):
"""
Constructs the oriented simplex of a given dimension.
Arguments
---------
dim : :class:`int`
The dimension of the simplex (default is :code:`-1`).
Returns
-------
simplex : :class:`Simplex`
The simplex of the requested dimension.
"""
utils.typecheck(dim, {
'type': int,
'st': lambda n: n >= -1,
'why': 'expecting integer >= -1'})
point = Point()
return Shape.join(*[point for _ in range(dim+1)])
@staticmethod
def cube(dim=0):
"""
Constructs the oriented cube of a given dimension.
Arguments
---------
dim : :class:`int`
The dimension of the cube (default is :code:`0`).
Returns
-------
cube : :class:`Cube`
The cube of the requested dimension.
"""
utils.typecheck(dim, {
'type': int,
'st': lambda n: n >= 0,
'why': 'expecting non-negative integer'})
arrow = Arrow()
return Shape.gray(*[arrow for _ in range(dim)])
@staticmethod
def globe(dim=0):
"""
Constructs the globe of a given dimension.
Arguments
---------
dim : :class:`int`
The dimension of the globe (default is :code:`0`).
Returns
-------
globe : :class:`Globe`
The globe of the requested dimension.
"""
return Shape.suspend(Point(), dim)
@staticmethod
def theta(*thetas):
"""
Inductive constructor for the objects of the Theta category,
sometimes known as Batanin cells.
Batanin cells are in 1-to-1 correspondence with finite plane trees.
The constructor is based on this correspondence, using the
well-known inductive definition of plane trees: given any number
:code:`k` of Batanin cells, it returns the Batanin cell encoded by
a root with :code:`k` children, to which the :code:`k` plane trees
encoding the arguments are attached.
Arguments
---------
thetas : :class:`Theta`
Any number of Batanin cells.
Returns
-------
theta : :class:`Theta`
The resulting Batanin cell.
Examples
--------
Every globe is a Batanin cell, encoded by the linear tree of length
equal to its dimension.
>>> assert Shape.theta() == Shape.globe(0)
>>> assert Shape.theta(Shape.theta()) == Shape.globe(1)
>>> assert Shape.theta(Shape.theta(Shape.theta())) == Shape.globe(2)
The tree with one root with n children corresponds to a string
of n arrows.
>>> point = Shape.theta()
>>> arrow = Shape.arrow()
>>> assert Shape.theta(point, point) == arrow.paste(arrow)
"""
if len(thetas) > 0:
theta = thetas[0]
utils.typecheck(theta, {'type': Theta})
thetas = thetas[1:]
if len(thetas) > 0:
return Shape.paste(
Shape.suspend(theta),
Shape.theta(*thetas), 0)
return Shape.suspend(theta)
return Point()
# Special maps
def id(self):
"""
Returns the identity map on the shape.
Returns
-------
id : :class:`ShapeMap`
The identity map on the object.
"""
return ShapeMap(super().id(),
wfcheck=False)
def boundary(self, sign=None, dim=None):
"""
Returns the inclusion of the boundary of a given orientation
and dimension into the shape.
Note that input and output boundaries of shapes are shapes,
so they are returned as shape maps; however, the entire (input
+ output) boundary of a shape is not a shape, so it is returned
simply as a map of oriented graded posets.
Arguments
---------
sign : :class:`str`, optional
Orientation: :code:`'-'` for input, :code:`'+'` for output,
:code:`None` (default) for both.
dim : :class:`int`, optional
Dimension of the boundary (default is :code:`self.dim - 1`).
Returns
-------
boundary : :class:`ShapeMap` | :class:`OgMap`
The inclusion of the requested boundary into the object.
Examples
--------
>>> point = Shape.point()
>>> arrow = Shape.arrow()
>>> binary = arrow.paste(arrow).atom(arrow)
>>> assert binary.boundary('-').source == arrow.paste(arrow)
>>> assert binary.boundary('+').source == arrow
>>> assert binary.boundary('-', 0).source == point
>>> assert binary.boundary('-').target == binary
"""
dim = self.dim-1 if dim is None else dim
if dim >= self.dim:
return self.id()
boundary_ogmap = super().boundary(sign, dim)
if sign is None:
return boundary_ogmap
reordering = Shape._reorder(boundary_ogmap.source)
boundary = reordering.then(boundary_ogmap)
def inheritance():
if dim == -1:
return Empty
if dim == 0:
return Point
if isinstance(self, OpetopeTree):
if isinstance(self, GlobeString):
if dim == 1:
return Arrow
return Globe
if utils.mksign(sign) == '+':
if dim == 1:
return Arrow
return Opetope
return OpetopeTree
return Shape
return inheritance()._upgrademapsrc(boundary)
def atom_inclusion(self, element):
"""
Returns the inclusion of the closure of an element, which
is an atomic shape, in the shape.
Arguments
---------
element : :class:`El`
An element of the shape.
Returns
-------
atom_inclusion : :class:`ShapeMap`
The inclusion of the closure of the element.
Examples
--------
>>> arrow = Shape.arrow()
>>> globe = Shape.globe(2)
>>> whisker_l = arrow.paste(globe)
>>> assert whisker_l.atom_inclusion(El(2, 0)).source == globe
"""
oginclusion = self.underset(element).as_map
reordering = Shape._reorder(oginclusion.source)
inclusion = reordering.then(oginclusion)
def inheritance():
if element.dim == 0:
return Point
if element.dim == 1:
return Arrow
if isinstance(self, Theta):
return Globe
if isinstance(self, OpetopeTree):
return Opetope
if isinstance(self, Simplex):
return Simplex
if isinstance(self, Cube):
return Cube
return Shape
return inheritance()._upgrademapsrc(inclusion)
def initial(self):
"""
Returns the unique map from the initial, empty shape.
Returns
-------
initial : :class:`ShapeMap`
The unique map from the empty shape.
Examples
--------
>>> point = Shape.point()
>>> empty = Shape.empty()
>>> assert point.initial() == empty.terminal()
>>> assert empty.initial() == empty.id()
"""
return ShapeMap(
OgMap(Empty(), self,
wfcheck=False),
wfcheck=False)
def terminal(self):
"""
Returns the unique map to the point, the terminal shape.
Returns
-------
terminal : :class:`ShapeMap`
The unique map to the point.
Examples
--------
>>> point = Shape.point()
>>> assert point.terminal() == point.id()
"""
mapping = [
[El(0, 0) for _ in n_data]
for n_data in self.face_data]
return ShapeMap(
OgMap(self, Point(), mapping,
wfcheck=False),
wfcheck=False)
def inflate(self, collapsed=None):
"""
Given a closed subset of the boundary of the shape, forms a
cylinder on the shape, with the sides incident to the closed subset
collapsed, and returns its projection map onto the original shape.
This is mainly used in constructing units and unitors on diagrams;
see :meth:`diagrams.Diagram.unit`, :meth:`diagrams.Diagram.lunitor`,
:meth:`diagrams.Diagram.runitor`.
Arguments
---------
collapsed : :class:`Closed`, optional
A closed subset of the boundary of the shape (default is
the entire boundary).
Returns
-------
inflate : :class:`Closed`
The projection map of the "partially collapsed cylinder" onto
the shape.
Raises
------
:class:`ValueError`
If `collapsed` is not a subset of the boundary.
"""
if self.dim == -1: # Some simple cases
return self.id()
if self.dim == 0:
return Shape.arrow().terminal()
boundary_set = self.all().boundary()
if collapsed is not None:
utils.typecheck(collapsed, {
'type': Closed,
'st': lambda x: x.issubset(boundary_set),
'why': "expecting a closed subset of the shape's boundary"})
else:
collapsed = boundary_set # Default is whole boundary.
asmap = collapsed.as_map
arrow = Shape.arrow()
map1 = OgMap.gray(arrow.id(), asmap)
map2 = OgMap.gray(arrow.terminal(), asmap.source.id())
pushout = OgMapPair(map1, map2).pushout(wfcheck=False)
# We use the cylinder projection map and the first leg of the pushout
# to define the projection map.
cyl_proj = OgMap.gray(arrow.terminal(), self.id())
collapse = pushout.fst
mapping = [
[None for _ in n_data]
for n_data in pushout.target.face_data]
for x in collapse.source:
y = collapse[x]
mapping[y.dim][y.pos] = cyl_proj[x]
ogproj = OgMap(collapse.target, self, mapping,
wfcheck=False)
proj = Shape._reorder(collapse.target).then(ogproj)
def inheritance():
if collapsed == boundary_set and isinstance(self, Opetope):
if isinstance(self, Globe):
return Globe
return Opetope
return Shape
return inheritance()._upgrademapsrc(proj)
def all_layerings(self):
"""
Returns an iterator on all *layerings* of a shape of dimension
:code:`n` into shapes with a single :code:`n`-dimensional element,
pasted along their :code:`(n-1)`-dimensional boundary.
Returns
-------
all_layerings : :class:`Iterable`
The iterator on all layerings of the shape.
"""
dim = self.dim
maximal = self.maximal().support
topdim = maximal[dim]
flow = self._flowgraph(topdim)
all_sorts = nx.all_topological_sorts(flow)
def test(sort):
return self._islayering(
list(sort), maximal, layerlist=True)
def layering(layers):
layering = []
for layer in layers:
oginclusion = GrSubset(
layer, self, wfcheck=False).closure().as_map
inclusion = Shape._reorder(oginclusion.source).then(
oginclusion)
layering.append(ShapeMap(inclusion, wfcheck=False))
return layering
return (
layering(test(sort)[1])
for sort in all_sorts if test(sort)[0]
)
def generate_layering(self):
"""
Assigns a layering to the shape, iterating through all
the layerings, and returns it.
Returns
-------
layers : :class:`list[ShapeMap]`
The generated layering.
Examples
--------
>>> arrow = Shape.arrow()
>>> globe = Shape.globe(2)
>>> chain = globe.paste(globe, 0)
>>> chain.generate_layering()
>>> assert chain.layers[0].source == arrow.paste(globe)
>>> assert chain.layers[1].source == globe.paste(arrow)
>>> chain.generate_layering()
>>> assert chain.layers[0].source == globe.paste(arrow)
>>> assert chain.layers[1].source == arrow.paste(globe)
"""
if not hasattr(self, '_layering_gen'):
self._layering_gen = self.all_layerings()
try:
self._layering = next(self._layering_gen)
except StopIteration:
self._layering_gen = self.all_layerings()
self._layering = next(self._layering_gen)
return self.layers
def draw(self, **params):
"""
Bound version of :meth:`strdiags.draw`.
Calling :code:`x.draw(**params)` is equivalent to calling
:code:`strdiags.draw(x, **params)`.
"""
from rewalt.strdiags import draw
return draw(self, **params)
def draw_boundaries(self, **params):
"""
Bound version of :meth:`strdiags.draw_boundaries`.
Calling :code:`x.draw_boundaries(**params)` is equivalent to
calling :code:`strdiags.draw_boundaries(x, **params)`.
"""
from rewalt.strdiags import draw_boundaries
return draw_boundaries(self, **params)
# Private methods
@staticmethod
def _reorder(shape):
"""
Traverses all elements and returns an isomorphism
from the shape with elements reordered in traversal order.
"""
mapping = [[] for _ in range(shape.dim + 1)]
marked = GrSet()
focus_stack = [shape.maximal().support] # traversal begins
while len(focus_stack) > 0:
focus = focus_stack[-1]
dim = focus.dim
top_dim = focus[dim]
top_unmarked = GrSet()
for x in top_dim:
if x not in marked:
top_unmarked.add(x)
if len(top_unmarked) == 0:
del focus_stack[-1]
else:
if dim == 0:
for x in top_dim:
mapping[dim].append(x)
marked.add(x)
del focus_stack[-1]
else:
focus_in_faces = GrSet()
focus_out_faces = GrSet()
candidates = {}
for x in top_dim:
isunmarked = x in top_unmarked
for y in shape.faces(x, '-'):
focus_in_faces.add(y)
if isunmarked:
candidates[y] = x
for y in shape.faces(x, '+'):
focus_out_faces.add(y)
focus_input = focus_in_faces.difference(
focus_out_faces)
if not focus_input.issubset(marked):
if len(focus[:dim]) > 0:
focus_input = focus_input.union(
focus[:dim])
focus_stack.append(focus_input)
else:
if len(focus) == 1:
for x in top_dim:
mapping[dim].append(x)
marked.add(x)
del focus_stack[-1]
focus_output = focus_out_faces.difference(
focus_in_faces)
if not focus_output.issubset(marked):
if len(focus[:dim]) > 0:
focus_output = focus_output.union(
focus[:dim])
focus_stack.append(focus_output)
else:
x = next(
x for x in mapping[dim-1]
if x in candidates.keys())
focus_stack.append(
GrSet(candidates[x]))
def reordered_faces(x, sign):
return {k for y in shape.faces(x, sign)
for k in range(shape.size[x.dim - 1])
if y == mapping[x.dim - 1][k]}
def reordered_cofaces(x, sign):
return {k for y in shape.cofaces(x, sign)
for k in range(shape.size[x.dim + 1])
if y == mapping[x.dim + 1][k]}
face_data = [
[
{sign: reordered_faces(x, sign)
for sign in ('-', '+')}
for x in n_data
]
for n_data in mapping]
coface_data = [
[
{sign: reordered_cofaces(x, sign)
for sign in ('-', '+')}
for x in n_data
]
for n_data in mapping]
reordered = Shape._upgrade(
OgPoset(face_data, coface_data,
wfcheck=False, matchcheck=False))
return OgMap(reordered, shape, mapping,
wfcheck=False)
def _flowgraph(self, grset):
"""
The 'flow graph' of the set in the shape.
"""
flowgraph = nx.DiGraph()
flowgraph.add_nodes_from(grset)
if grset.dim > 0:
for x in grset:
for y in grset:
if not self.faces(x, '+').isdisjoint(
self.faces(y, '-')):
flowgraph.add_edge(x, y)
return flowgraph
def _ispastable(self, fst, snd, **params):
"""
Returns whether fst is a 'pastable', round region of snd
(both given just as GrSets of maximal elements)
"""
wfcheck = params.get('wfcheck', True)
if wfcheck:
if not fst.issubset(snd):
return False
if not fst.dim == snd.dim:
return False
if len(fst) == 1:
return True
if wfcheck:
fst_closed = GrSubset(fst, self).closure()
if not fst_closed.isround:
return False
dim = fst.dim
fst_flow = self._flowgraph(fst)
snd_flow = self._flowgraph(snd[dim])
mapping = {
x: x for x in fst}
remaining = set(fst_flow.nodes)
for e in fst_flow.edges:
src = mapping[e[0]]
tgt = mapping[e[1]]
if src != tgt:
snd_flow = nx.contracted_edge(
snd_flow, (src, tgt), self_loops=False)
for x in mapping:
if mapping[x] == tgt:
mapping[x] = src
remaining.remove(tgt)
if len(remaining) > 1: # fst_flow not connected
return False
if not nx.is_directed_acyclic_graph(snd_flow): # fst_flow not convex
return False
if fst.dim < 3: # nothing else needs to be checked in dim <= 2
return True
fst_sort = next(
(
list(sort)
for sort in nx.all_topological_sorts(fst_flow)
if self._islayering(list(sort), fst)
), None)
if fst_sort is None: # cannot layer fst
return False
for x in remaining:
fst_el = x
for sort in nx.all_topological_sorts(snd_flow):
snd_sort = list(sort)
fst_index = snd_sort.index(fst_el)
amended = [
*snd_sort[:fst_index],
*fst_sort,
*snd_sort[fst_index+1:]
]
if self._islayering(amended, snd):
return True
return False
def _islayering(self, ellist, grset, **params):
"""
Returns whether a list of top-dimensional elements is a valid
layering of a molecule (given as its set of maximal elements)
"""
layerlist = params.get('layerlist', False)
dim = grset.dim
top_dim = grset[dim]
focus = grset[:dim] # initialise to input boundary
in_faces = GrSet().union(
*[self.faces(x, '-') for x in top_dim])
for y in in_faces:
if self.cofaces(y, '+').isdisjoint(grset):
focus.add(y)
if layerlist:
layers = []
for x in ellist:
in_x = self.faces(x, '-')
if not self._ispastable(
in_x, focus):
if layerlist:
return False, []
return False
if layerlist:
layers.append(
focus.difference(in_x).union(GrSet(x)))
out_x = self.faces(x, '+')
focus = focus.difference(in_x).union(out_x)
if layerlist:
return True, layers
return True
@classmethod
def _upgrade(cls, ogp):
"""
Forces upgrade of an OgPoset to the shape class.
"""
shape = OgPoset.__new__(cls)
shape.__init__(ogp.face_data, ogp.coface_data,
wfcheck=False, matchcheck=False)
return shape
@classmethod
def _upgrademapsrc(cls, ogmap):
"""
Upgrades the source of an OgMap to a shape class, and declares
it a ShapeMap.
"""
if isinstance(ogmap, OgMapPair):
return OgMapPair(
cls._upgrademapsrc(ogmap.fst),
cls._upgrademapsrc(ogmap.snd))
return ShapeMap(OgMap(
cls._upgrade(ogmap.source),
ogmap.target,
ogmap.mapping,
wfcheck=False), wfcheck=False)
@classmethod
def _upgrademaptgt(cls, ogmap):
"""
Upgrades the target of an OgMap to a shape class, and declares
it a ShapeMap.
"""
if isinstance(ogmap, OgMapPair):
return OgMapPair(
cls._upgrademaptgt(ogmap.fst),
cls._upgrademaptgt(ogmap.snd))
return ShapeMap(OgMap(
ogmap.source,
cls._upgrade(ogmap.target),
ogmap.mapping,
wfcheck=False), wfcheck=False)
class Simplex(Shape):
"""
Subclass of :class:`Shape` for oriented simplices.
The methods of this class provide a full implementation of the
category of simplices, which is generated by the face and
degeneracy maps between simplices one dimension apart.
Use :meth:`Shape.simplex` to construct.
Examples
--------
We create a 1-simplex (arrow), a 2-simplex (triangle),
and a 3-simplex (tetrahedron).
>>> arrow = Shape.simplex(1)
>>> triangle = Shape.simplex(2)
>>> tetra = Shape.simplex(3)
We can then check some of the simplicial relations between
degeneracy and face maps.
>>> map1 = triangle.simplex_degeneracy(2).then(
... arrow.simplex_degeneracy(1))
>>> map2 = triangle.simplex_degeneracy(1).then(
... arrow.simplex_degeneracy(1))
>>> assert map1 == map2
>>> map3 = tetra.simplex_face(2).then(
... triangle.simplex_degeneracy(2))
>>> assert map3 == triangle.id()
>>> map4 = tetra.simplex_face(0).then(
... triangle.simplex_degeneracy(2))
>>> map5 = arrow.simplex_degeneracy(1).then(
... triangle.simplex_face(0))
>>> assert map4 == map5
"""
def __new__(self):
return OgPoset.__new__(Empty)
def simplex_face(self, k):
"""
Returns one of the face inclusion maps of the simplex.
Arguments
---------
k : :class:`int`
The index of the face map, ranging from :code:`0` to
:code:`self.dim`.
Returns
-------
simplex_face : :class:`ShapeMap`
The face map.
Raises
------
:class:`ValueError`
If the index is out of range.
"""
utils.typecheck(k, {
'type': int,
'st': lambda k: k in range(self.dim + 1),
'why': 'out of bounds'})
pointid = Point().id()
maps = [
*[pointid for _ in range(k)],
Empty().terminal(),
*[pointid for _ in range(self.dim - k)]
]
return ShapeMap.join(*maps)
def simplex_degeneracy(self, k):
"""
Returns one of the collapse (degeneracy) maps of the simplex
one dimension higher.
Arguments
---------
k : :class:`int`
The index of the degeneracy map, ranging from :code:`0` to
:code:`self.dim`.
Returns
-------
simplex_degeneracy : :class:`ShapeMap`
The degeneracy map.
Raises
------
:class:`ValueError`
If the index is out of range.
"""
utils.typecheck(k, {
'type': int,
'st': lambda k: k in range(self.dim + 1),
'why': 'out of bounds'})
pointid = Point().id()
maps = [
*[pointid for _ in range(k)],
Arrow().terminal(),
*[pointid for _ in range(self.dim - k)]
]
return ShapeMap.join(*maps)
class Empty(Simplex):
"""
Subclass of :class:`Shape` for the empty shape.
Use :meth:`Shape.empty` to construct.
"""
def __new__(self):
return OgPoset.__new__(Empty)
def __init__(self, face_data=None, coface_data=None,
**params):
super().__init__([], [],
wfcheck=False, matchcheck=False)
class Cube(Shape):
"""
Subclass of :class:`Shape` for oriented cubes.
The methods of this class provide a full implementation of the
category of cubes with connections, which is generated by the
face, degeneracy, and connection maps between cubes one
dimension apart.
Use :meth:`Shape.cube` to construct.
Examples
--------
We create a 1-cube (arrow), 2-cube (square), and 3-cube (cube).
>>> arrow = Shape.cube(1)
>>> square = Shape.cube(2)
>>> cube = Shape.cube(3)
We can then check some of the relations between cubical face,
connection, and degeneracy maps.
>>> map1 = square.cube_degeneracy(2).then(
... arrow.cube_degeneracy(1))
>>> map2 = square.cube_degeneracy(1).then(
... arrow.cube_degeneracy(1))
>>> assert map1 == map2
>>> map3 = square.cube_face(0, '+').then(
... cube.cube_face(2, '-'))
>>> map4 = square.cube_face(1, '-').then(
... cube.cube_face(0, '+'))
>>> assert map3 == map4
>>> map5 = square.cube_connection(1, '-').then(
... arrow.cube_connection(0, '-'))
>>> map6 = square.cube_connection(0, '-').then(
... arrow.cube_connection(0, '-'))
>>> assert map5 == map6
"""
def __new__(self):
return OgPoset.__new__(Point)
def cube_face(self, k, sign):
"""
Returns one of the face inclusion maps of the cube.
Arguments
---------
k : :class:`int`
Index of the face map, ranging from :code:`0` to
:code:`self.dim - 1`.
sign : :class:`str`
Side: :code:`'-'` or :code:`'+'`.
Returns
-------
cube_face : :class:`ShapeMap`
The face map.
Raises
------
:class:`ValueError`
If the index is out of range.
"""
sign = utils.mksign(sign)
utils.typecheck(k, {
'type': int,
'st': lambda k: k in range(self.dim),
'why': 'out of bounds'})
basic_faces = {
'-': ShapeMap(OgMap(
Point(), Arrow(),
[[El(0, 0)]],
wfcheck=False), wfcheck=False),
'+': ShapeMap(OgMap(
Point(), Arrow(),
[[El(0, 1)]],
wfcheck=False), wfcheck=False)}
arrowid = Arrow().id()
maps = [
*[arrowid for _ in range(k)],
basic_faces[sign],
*[arrowid for _ in range(self.dim - k - 1)]
]
return ShapeMap.gray(*maps)
def cube_degeneracy(self, k):
"""
Returns one of the "degeneracy" collapse maps of the cube
one dimension higher.
Arguments
---------
k : :class:`int`
The index of the degeneracy map, ranging from :code:`0` to
:code:`self.dim`.
Returns
-------
cube_degeneracy : :class:`ShapeMap`
The degeneracy map.
Raises
------
:class:`ValueError`
If the index is out of range.
"""
utils.typecheck(k, {
'type': int,
'st': lambda k: k in range(self.dim + 1),
'why': 'out of bounds'})
arrowid = Arrow().id()
maps = [
*[arrowid for _ in range(k)],
Arrow().terminal(),
*[arrowid for _ in range(self.dim - k)]
]
return ShapeMap.gray(*maps)
def cube_connection(self, k, sign):
"""
Returns one of the "connection" collapse maps of the cube
one dimension higher.
Arguments
---------
k : :class:`int`
Index of the connection map, ranging from :code:`0` to
:code:`self.dim - 1`.
sign : :class:`str`
Side: :code:`'-'` or :code:`'+'`.
Returns
-------
cube_face : :class:`ShapeMap`
The connection map.
Raises
------
:class:`ValueError`
If the index is out of range.
"""
sign = utils.mksign(sign)
utils.typecheck(k, {
'type': int,
'st': lambda k: k in range(self.dim),
'why': 'out of bounds'})
basic_connections = {
'-': ShapeMap(OgMap(
Shape.cube(2), Arrow(),
[[El(0, 0), El(0, 0), El(0, 1), El(0, 0)],
[El(0, 0), El(1, 0), El(0, 0), El(1, 0)],
[El(1, 0)]],
wfcheck=False), wfcheck=False),
'+': ShapeMap(OgMap(
Shape.cube(2), Arrow(),
[[El(0, 0), El(0, 1), El(0, 1), El(0, 1)],
[El(1, 0), El(0, 1), El(1, 0), El(0, 1)],
[El(1, 0)]],
wfcheck=False), wfcheck=False)}
arrowid = Arrow().id()
maps = [
*[arrowid for _ in range(k)],
basic_connections[sign],
*[arrowid for _ in range(self.dim - k - 1)]
]
return ShapeMap.gray(*maps)
class Theta(Shape):
"""
Subclass of :class:`Shape` for Batanin cells.
Use :meth:`Shape.theta` to construct.
"""
def __new__(self):
return OgPoset.__new__(Point)
class OpetopeTree(Shape):
"""
Subclass of :class:`Shape` for shapes that can appear as input
boundaries of opetopes.
Use :class:`Shape` methods to construct.
"""
def __new__(self):
return OgPoset.__new__(Point)
class GlobeString(Theta, OpetopeTree):
"""
Subclass of :class:`Shape` for "strings of globes" pasted in the top
dimension.
This is the "intersection" of :class:`OpetopeTree` and :class:`Theta`.
Use :class:`Shape` methods to construct.
"""
def __new__(self):
return OgPoset.__new__(Point)
class Opetope(OpetopeTree):
"""
Subclass of :class:`Shape` for (positive) opetopes.
Use :class:`Shape` methods to construct.
"""
def __new__(self):
return OgPoset.__new__(Point)
class Globe(GlobeString, Opetope):
"""
Subclass of :class:`Shape` for globes.
Use :meth:`Shape.globe` to construct.
"""
def __new__(self):
return OgPoset.__new__(Point)
class Point(Globe, Simplex, Cube):
"""
Subclass of :class:`Shape` for the point.
Use :meth:`Shape.point` to construct.
"""
def __new__(self):
return OgPoset.__new__(Point)
def __init__(self, face_data=None, coface_data=None,
**params):
super().__init__(
[[{'-': set(), '+': set()}]],
[[{'-': set(), '+': set()}]],
wfcheck=False, matchcheck=False)
class Arrow(Globe, Simplex, Cube):
"""
Subclass of :class:`Shape` for the arrow shape.
Use :meth:`Shape.arrow` to construct.
"""
def __new__(self):
return OgPoset.__new__(Arrow)
def __init__(self, face_data=None, coface_data=None,
**params):
super().__init__(
[
[{'-': set(), '+': set()}, {'-': set(), '+': set()}],
[{'-': {0}, '+': {1}}]
], [
[{'-': {0}, '+': set()}, {'-': set(), '+': {0}}],
[{'-': set(), '+': set()}]
],
wfcheck=False, matchcheck=False)
class ShapeMap(OgMap):
"""
An overlay of :class:`ogposets.OgMap` for total maps between
:class:`Shape` objects.
It is used to extend constructions of shapes functorially to their
maps, in a way that is compatible with the unique representation
of shapes by their underlying :class:`ogposets.OgPoset` objects.
The most common :class:`ShapeMap` objects are created by methods of
:class:`Shape` such as :meth:`Shape.boundary` and :meth:`Shape.inflate`,
or of its subclasses, such as :meth:`Simplex.simplex_degeneracy` or
:meth:`Cube.cube_connection`.
Nevertheless, occasionally we may need to define a map explicitly,
in which case we first define an object :code:`f` of class
:class:`ogposets.OgMap`, then upgrade it to a :class:`ShapeMap`
with the constructor :code:`ShapeMap(f)`.
Arguments
---------
ogmap : :class:`ogposets.OgMap`
A total map between shapes.
Keyword arguments
-----------------
wfcheck : :class:`bool`
Check whether the given map is a total map between shapes
(default is :code:`True`).
"""
def __init__(self, ogmap, **params):
wfcheck = params.get('wfcheck', True)
if wfcheck:
utils.typecheck(ogmap, {
'type': OgMap,
'st': lambda f: f.istotal,
'why': 'a ShapeMap must be total'})
for x in ogmap.source, ogmap.target:
utils.typecheck(x, {'type': Shape})
super().__init__(ogmap.source, ogmap.target, ogmap.mapping,
wfcheck=False)
def then(self, other, *others):
for f in [other, *others]:
if not isinstance(f, ShapeMap):
return super().then(other, *others)
return ShapeMap(
super().then(other, *others),
wfcheck=False)
@property
def layers(self):
"""
Returns the current layering of the map's source, composed
with the map.
Returns
-------
layers : :class:`list[ShapeMap]`
The source's current layering, composed with the map.
"""
if not hasattr(self.source, '_layering'):
return [self]
return [
f.then(self)
for f in self.source._layering
]
@property
def rewrite_steps(self):
"""
Returns the sequence of rewrite steps associated to the current
layering of the map's source, composed with the map.
Returns
-------
rewrite_steps : :class:`list[ShapeMap]`
The source's current sequence of rewrite steps, composed
with the map.
"""
rewrite_steps = [
*[layer.input for layer in self.layers],
self.layers[-1].output
]
return rewrite_steps
@staticmethod
def gray(*maps):
for f in maps:
if not isinstance(f, ShapeMap):
return OgMap.gray(*maps)
if len(maps) == 0:
return Shape.point().id()
if len(maps) == 1:
return maps[0]
gray = OgMap.gray(*maps)
if gray.source in [f.source for f in maps]:
if gray.target in [f.target for f in maps]:
return gray
if gray.source not in [f.source for f in maps]:
gray = Shape._reorder(gray.source).then(gray)
if gray.target not in [f.target for f in maps]:
gray = gray.then(Shape._reorder(gray.target).inv())
def inheritance(l):
if all([isinstance(x, Cube) for x in l]):
return Cube
return Shape
return inheritance([f.source for f in maps])._upgrademapsrc(
inheritance([f.target for f in maps])._upgrademaptgt(
gray))
@staticmethod
def join(*maps):
for f in maps:
if not isinstance(f, ShapeMap):
return OgMap.join(*maps)
if len(maps) == 0:
return Shape.empty().id()
if len(maps) == 1:
return maps[0]
join = OgMap.join(*maps)
if join.source in [f.source for f in maps]:
if join.target in [f.target for f in maps]:
return join
if join.source not in [f.source for f in maps]:
join = Shape._reorder(join.source).then(join)
if join.target not in [f.target for f in maps]:
join = join.then(Shape._reorder(join.target).inv())
def inheritance(l):
if all([isinstance(x, Simplex) for x in l]):
if sum([x.dim+1 for x in l]) == 2:
return Arrow
return Simplex
return Shape
return inheritance([f.source for f in maps])._upgrademapsrc(
inheritance([f.target for f in maps])._upgrademaptgt(
join))
def dual(shapemap, *dims):
if not isinstance(shapemap, ShapeMap):
return OgMap.dual(shapemap, *dims)
ogdual = OgMap.dual(shapemap, *dims)
dual = Shape._reorder(ogdual.source).then(ogdual).then(
Shape._reorder(ogdual.target).inv())
def inheritance(x, y):
if x == y:
return x.__class__
if isinstance(x, Theta):
return Theta
return Shape
return inheritance(shapemap.source, dual.source)._upgrademapsrc(
inheritance(shapemap.target, dual.target)._upgrademaptgt(
dual))
def generate_layering(self):
"""
Shorthand for :code:`source.generate_layering()`.
"""
self.source.generate_layering()
def draw(self, **params):
"""
Bound version of :meth:`strdiags.draw`.
Calling :code:`f.draw(**params)` is equivalent to calling
:code:`strdiags.draw(f, **params)`.
"""
from rewalt.strdiags import draw
return draw(self, **params)
def draw_boundaries(self, **params):
"""
Bound version of :meth:`strdiags.draw_boundaries`.
Calling :code:`f.draw_boundaries(**params)` is equivalent to calling
:code:`strdiags.draw_boundaries(f, **params)`.
"""
from rewalt.strdiags import draw_boundaries
return draw_boundaries(self, **params)
| 73,090 | 31.1845 | 78 |
py
|
rewalt
|
rewalt-main/rewalt/__init__.py
|
from rewalt import (
ogposets, shapes, diagrams, hasse, strdiags)
from rewalt.ogposets import (El, OgPoset, OgMap, GrSet, GrSubset)
from rewalt.shapes import (Shape, ShapeMap)
from rewalt.diagrams import (DiagSet, Diagram)
from rewalt.strdiags import StrDiag
__version__ = '0.1.0'
| 290 | 31.333333 | 65 |
py
|
rewalt
|
rewalt-main/rewalt/drawing.py
|
"""
Drawing backends.
"""
from abc import ABC
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import PathPatch
DEFAULT = {
'bgcolor': 'white',
'fgcolor': 'black',
'orientation': 'bt',
}
class DrawBackend(ABC):
"""
Abstract drawing backend for placing nodes, wires, arrows,
and labels on a canvas.
The purpose of this class is simply to describe the signature
of methods that subclasses have to implement.
Keyword arguments
-----------------
bgcolor : multiple types
The background colour (default is :code:`'white'`).
fgcolor : multiple types
The foreground colour (default is :code:`'black'`).
orientation : :class:`str`
Orientation: one of :code:`'bt'` (bottom-to-top), :code:`'lr'`
(left-to-right), :code:`'tb'` (top-to-bottom), :code:`'rl'`
(right-to-left) (default is :code:`'bt'`).
Notes
-----
All coordinates should be passed to the backend *as if* the
orientation was bottom-to-top; the backend will then make rotations
and adjustments according to the chosen orientation.
"""
def __init__(self, **params):
self.bgcolor = params.get(
'bgcolor', DEFAULT['bgcolor'])
self.fgcolor = params.get(
'fgcolor', DEFAULT['fgcolor'])
self.orientation = params.get(
'orientation', DEFAULT['orientation'])
self.name = params.get('name', None)
def draw_wire(self, wire_xy, node_xy, **params):
"""
Draws a wire from a wire vertex to a node vertex on the canvas.
Arguments
---------
wire_xy : :class:`tuple[float]`
The coordinates of the wire vertex.
node_xy : :class:`tuple[float]`
The coordinates of the node vertex.
Keyword arguments
-----------------
color : multiple types
The colour of the wire (default is :code:`self.fgcolor`).
alpha : :class:`float`
Alpha factor of the wire (default is :code:`1`).
depth : :class:`bool`
Whether to draw the wire with a contour, to simulate "crossing
over" objects that are already on the canvas (default is
:code:`True`).
"""
pass
def draw_label(self, label, xy, offset, **params):
"""
Draws a label next to a location on the canvas.
Arguments
---------
label : :class:`str`
The label.
xy : :class:`tuple[float]`
The coordinates of the object to be labelled.
offset : :class:`tuple[float]`
Point offset of the label relative to the object.
Keyword arguments
-----------------
color : multiple types
The colour of the label (default is :code:`self.fgcolor`).
"""
pass
def draw_node(self, xy, **params):
"""
Draws a node on the canvas.
Arguments
---------
xy : :class:`tuple[float]`
The coordinates of the node.
Keyword arguments
-----------------
color : multiple types
Fill colour of the node (default is :code:`self.fgcolor`).
stroke : multiple types
Stroke colour of the node (default is same as `color`).
"""
pass
def draw_arrow(self, xy0, xy1, **params):
"""
Draws an arrow on the canvas.
Arguments
---------
xy0 : :class:`tuple[float]`
The coordinates of the starting point.
xy1 : :class:`tuple[float]`
The coordinates of the ending point.
Keyword arguments
-----------------
color : multiple types
Colour of the arrow (default is :code:`self.fgcolor`).
shorten : :class:`float`
Factor by which to scale the length (default is :code:`1`).
"""
pass
def output(self, **params):
"""
Output the picture.
Keyword arguments
-----------------
show : :class:`bool`
Whether to show the output (default is :code:`True`).
path : :class:`str`
Path where to save the output (default is :code:`None`).
scale : :class:`float`
(TikZ only) Scale factor to apply to output (default is
:code:`3`).
xscale : :class:`float`
(TikZ only) Scale factor to apply to x axis in output
(default is same as `scale`)
yscale : :class:`float`
(TikZ only) Scale factor to apply to y axis in output
(default is same as `scale`)
"""
pass
def rotate(self, xy):
"""
Returns coordinates rotated according to the orientation
of the picture.
Arguments
---------
xy : :class:`tuple[float]`
The coordinates to rotate.
Returns
-------
rotate : :class:`tuple[float]`
The rotated coordinates.
"""
if self.orientation == 'tb':
return (xy[0], 1-xy[1])
if self.orientation == 'lr':
return (xy[1], 1-xy[0])
if self.orientation == 'rl':
return (1-xy[1], 1-xy[0])
return xy
class TikZBackend(DrawBackend):
"""
Drawing backend outputting TikZ code that can be embedded in a
LaTeX document.
"""
def __init__(self, **params):
super().__init__(**params)
self.bg = '\\path[fill, color={}] (0, 0) rectangle (1, 1)'.format(
self.bgcolor)
self.wirelayer = []
self.nodelayer = []
self.arrowlayer = []
self.labellayer = []
def draw_wire(self, wire_xy, node_xy, **params):
super().draw_wire(wire_xy, node_xy, **params)
color = params.get('color', self.fgcolor)
alpha = params.get('alpha', 1)
depth = params.get('depth', True)
def to_cubic(p0, p1, p2):
control1 = (p0[0]/3 + 2*p1[0]/3, p0[1]/3 + 2*p1[1]/3)
control2 = (2*p1[0]/3 + p2[0]/3, 2*p1[1]/3 + p2[1]/3)
return p0, control1, control2, p2
if depth:
width = .02
contour = '\\path[fill, color={}] {} .. controls {} '\
'and {} .. {} to {} .. controls {} and {} .. {};\n'.format(
self.bgcolor,
*[self.rotate(p) for p in to_cubic(
node_xy,
(wire_xy[0] - (width/2), node_xy[1]),
(wire_xy[0] - (width/2), wire_xy[1])
)],
*[self.rotate(p) for p in to_cubic(
(wire_xy[0] + (width/2), wire_xy[1]),
(wire_xy[0] + (width/2), node_xy[1]),
node_xy)]
)
self.wirelayer.append(contour)
wire = '\\draw[color={}, opacity={}] {} .. controls {} and {} .. '\
'{};\n'.format(
color,
alpha,
*[self.rotate(p) for p in to_cubic(
node_xy,
(wire_xy[0], node_xy[1]),
wire_xy)]
)
self.wirelayer.append(wire)
def draw_label(self, label, xy, offset, **params):
super().draw_label(label, xy, offset, **params)
color = params.get('color', self.fgcolor)
xy = self.rotate(xy)
label = '\\node[text={}, font={{\\scriptsize \\sffamily}}, '\
'xshift={}pt, yshift={}pt] at {} {{{}}};\n'.format(
color,
offset[0],
offset[1],
xy,
label)
self.labellayer.append(label)
def draw_node(self, xy, **params):
super().draw_node(xy, **params)
color = params.get('color', self.fgcolor)
stroke = params.get('stroke', color)
xy = self.rotate(xy)
node = '\\node[circle, fill={}, draw={}, inner sep=1pt] '\
'at {} {{}};\n'.format(
color, stroke, xy)
self.nodelayer.append(node)
def draw_arrow(self, xy0, xy1, **params):
super().draw_arrow(xy0, xy1, **params)
color = params.get('color', self.fgcolor)
shorten = params.get('shorten', 1)
xy0 = self.rotate(xy0)
xy1 = self.rotate(xy1)
dxy = (xy1[0] - xy0[0], xy1[1] - xy0[1])
xy0_off = (
xy0[0] + 0.5*(1 - shorten)*dxy[0],
xy0[1] + 0.5*(1 - shorten)*dxy[1])
xy1_off = (
xy1[0] - 0.5*(1 - shorten)*dxy[0],
xy1[1] - 0.5*(1 - shorten)*dxy[1])
arrow = '\\draw[->, draw={}] {} -- {};\n'.format(
color,
xy0_off,
xy1_off)
self.arrowlayer.append(arrow)
def output(self, **params):
super().output(**params)
path = params.get('path', None)
show = params.get('show', True)
scale = params.get('scale', 1)
xscale = params.get('xscale', scale)
yscale = params.get('yscale', scale)
baseline = '{([yshift=-.5ex]current bounding box.center)}'
lines = [
'\\begin{{tikzpicture}}[xscale={}, yscale={}, '
'baseline={}]\n'.format(
xscale,
yscale,
baseline),
'\\path[fill={}] (0, 0) rectangle (1, 1);\n'.format(
self.bgcolor),
*self.wirelayer,
*self.nodelayer,
*self.arrowlayer,
*self.labellayer,
'\\end{tikzpicture}']
if path is None and show:
print(''.join(lines))
if path is not None:
with open(path, 'w+') as file:
file.writelines(lines)
class MatBackend(DrawBackend):
"""
Drawing backend outputting Matplotlib figures.
"""
def __init__(self, **params):
super().__init__(**params)
self.fig, self.axes = plt.subplots()
self.axes.set_facecolor(self.bgcolor)
self.axes.set_xlim(0, 1)
self.axes.set_ylim(0, 1)
self.axes.xaxis.set_visible(False)
self.axes.yaxis.set_visible(False)
for side in ('top', 'right', 'bottom', 'left'):
self.axes.spines[side].set_visible(False)
def draw_wire(self, wire_xy, node_xy, **params):
super().draw_wire(wire_xy, node_xy, **params)
color = params.get('color', self.fgcolor)
alpha = params.get('alpha', 1)
depth = params.get('depth', True)
if depth:
width = .02
contour = Path(
[
self.rotate(node_xy),
self.rotate(
(wire_xy[0] - 0.5*width, node_xy[1])),
self.rotate(
(wire_xy[0] - 0.5*width, wire_xy[1])),
self.rotate(
(wire_xy[0] + 0.5*width, wire_xy[1])),
self.rotate(
(wire_xy[0] + 0.5*width, node_xy[1])),
self.rotate(node_xy)
], [
Path.MOVETO,
Path.CURVE3,
Path.CURVE3,
Path.LINETO,
Path.CURVE3,
Path.CURVE3
])
p_contour = PathPatch(
contour,
facecolor=self.bgcolor,
edgecolor='none')
self.axes.add_patch(p_contour)
wire = Path(
[
self.rotate(wire_xy),
self.rotate(
(wire_xy[0], node_xy[1])),
self.rotate(node_xy)
], [
Path.MOVETO,
Path.CURVE3,
Path.CURVE3
])
p_wire = PathPatch(
wire,
facecolor='none',
edgecolor=color,
alpha=alpha,
lw=1)
self.axes.add_patch(p_wire)
def draw_label(self, label, xy, offset, **params):
super().draw_label(label, xy, offset, **params)
color = params.get('color', self.fgcolor)
ha = params.get('ha', 'left')
va = params.get('va', 'baseline')
xy = self.rotate(xy)
xytext = (xy[0] + offset[0], xy[1] + offset[1])
self.axes.annotate(
label,
xy,
xytext=xytext,
textcoords='offset pixels',
color=color,
ha=ha,
va=va)
def draw_node(self, xy, **params):
super().draw_node(xy, **params)
color = params.get('color', self.fgcolor)
stroke = params.get('stroke', color)
xy = self.rotate(xy)
self.axes.scatter(
xy[0],
xy[1],
s=40,
c=color,
edgecolors=stroke,
zorder=2)
def draw_arrow(self, xy0, xy1, **params):
super().draw_arrow(xy0, xy1, **params)
color = params.get('color', self.fgcolor)
shorten = params.get('shorten', 1)
xy0 = self.rotate(xy0)
xy1 = self.rotate(xy1)
dxy = (xy1[0] - xy0[0], xy1[1] - xy0[1])
xy0_off = (
xy0[0] + 0.5*(1 - shorten)*dxy[0],
xy0[1] + 0.5*(1 - shorten)*dxy[1])
xy1_off = (
xy1[0] - 0.5*(1 - shorten)*dxy[0],
xy1[1] - 0.5*(1 - shorten)*dxy[1])
self.axes.annotate(
'',
xy=xy1_off,
xytext=xy0_off,
arrowprops=dict(
arrowstyle='->',
color=color,
shrinkA=0,
shrinkB=0))
def output(self, **params):
super().output(**params)
path = params.get('path', None)
show = params.get('show', True)
self.fig.subplots_adjust(
top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
if path is not None:
self.fig.savefig(path)
if not show:
plt.close(self.fig)
if show:
plt.show()
| 14,403 | 30.937916 | 75 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_fixed_ns.py
|
import os
import numpy as np
import random
from itertools import count
from multiprocessing import Pool, cpu_count
from copy import deepcopy
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class NSG:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
def fast_non_dominated_sort(self, values):
"""
优化问题一般是求最小值
:param values: 解集[目标函数1解集,目标函数2解集...]
:return:返回解的各层分布集合序号。类似[[1], [9], [0, 8], [7, 6], [3, 5], [2, 4]] 其中[1]表示Pareto 最优解对应的序号
"""
values11 = values[0] # 函数1解集
S = [[] for _ in range(0, len(values11))] # 存放 每个个体支配解的集合。
front = [[]] # 存放群体的级别集合,一个级别对应一个[]
n = [0 for _ in range(0, len(values11))] # 每个个体被支配解的个数 。即针对每个解,存放有多少好于这个解的个数
rank = [np.inf for i in range(0, len(values11))] # 存放每个个体的级别
for p in range(0, len(values11)): # 遍历每一个个体
# ====得到各个个体 的被支配解个数 和支配解集合====
S[p] = [] # 该个体支配解的集合 。即存放差于该解的解
n[p] = 0 # 该个体被支配的解的个数初始化为0 即找到有多少好于该解的 解的个数
for q in range(0, len(values11)): # 遍历每一个个体
less = 0 # 的目标函数值小于p个体的目标函数值数目
equal = 0 # 的目标函数值等于p个体的目标函数值数目
greater = 0 # 的目标函数值大于p个体的目标函数值数目
for k in range(len(values)): # 遍历每一个目标函数
if values[k][p] > values[k][q]: # 目标函数k时,q个体值 小于p个体
less = less + 1 # q比p 好
if values[k][p] == values[k][q]: # 目标函数k时,p个体值 等于于q个体
equal = equal + 1
if values[k][p] < values[k][q]: # 目标函数k时,q个体值 大于p个体
greater = greater + 1 # q比p 差
if (less + equal == len(values)) and (equal != len(values)):
n[p] = n[p] + 1 # q比好, 比p好的个体个数加1
elif (greater + equal == len(values)) and (equal != len(values)):
S[p].append(q) # q比p差,存放比p差的个体解序号
# =====找出Pareto 最优解,即n[p]===0 的 个体p序号。=====
if n[p] == 0:
rank[p] = 0 # 序号为p的个体,等级为0即最优
if p not in front[0]:
# 如果p不在第0层中
# 将其追加到第0层中
front[0].append(p) # 存放Pareto 最优解序号
# =======划分各层解========
"""
#示例,假设解的分布情况如下,由上面程序得到 front[0] 存放的是序号1
个体序号 被支配个数 支配解序号 front
1 0 2,3,4,5 0
2 1 3,4,5
3 1 4,5
4 3 5
5 4 0
#首先 遍历序号1的支配解,将对应支配解[2,3,4,5] ,的被支配个数-1(1-1,1-1,3-1,4-1)
得到
表
个体序号 被支配个数 支配解序号 front
1 0 2,3,4,5 0
2 0 3,4,5
3 0 4,5
4 2 5
5 2 0
#再令 被支配个数==0 的序号 对应的front 等级+1
得到新表...
"""
i = 0
while front[i] != []: # 如果分层集合为不为空
Q = []
for p in front[i]: # 遍历当前分层集合的各个个体p
for q in S[p]: # 遍历p 个体 的每个支配解q
n[q] = n[q] - 1 # 则将fk中所有给对应的个体np-1
if n[q] == 0:
# 如果nq==0
rank[q] = i + 1
if q not in Q:
Q.append(q) # 存放front=i+1 的个体序号
i = i + 1 # front 等级+1
front.append(Q)
del front[len(front) - 1] # 删除循环退出 时 i+1产生的[]
return front # 返回各层 的解序号集合 # 类似[[1], [9], [0, 8], [7, 6], [3, 5], [2, 4]]
def select_action(self, obs):
# job_state, machines_state,
(
job_res_req_rate,
job_run_time,
machines_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# std 越小越好
action_std = np.ones(self.machine_num) * np.inf
# action_std = np.ones((10, 4)) * np.inf # 分别考虑的
machines_occupancy_rate = machines_occupancy_rate[:, 0, :]
# 运行时长越小越好
action_run_time = np.ones(self.machine_num) * np.inf
# action_run_time_diff = np.ones(self.machine_num) * np.inf
# 预先放置, 计算一遍可行动作,计算std, 以及剩余运行时长
for machine_index, mask in enumerate(action_mask):
# 跳过非法action
if mask == False:
continue
after_machines_occupancy_rate = deepcopy(machines_occupancy_rate)
after_machines_run_time = deepcopy(machines_run_time)
after_machines_occupancy_rate[machine_index] += job_res_req_rate
after_machines_run_time[machine_index] = max(
after_machines_run_time[machine_index], job_run_time
)
# caculate std
after_std = np.std(after_machines_occupancy_rate, axis=0) # m*4
action_std[machine_index] = np.mean(after_std)
# caculate runtime
after_run_time = np.mean(after_machines_run_time) # m*1
action_run_time[machine_index] = after_run_time
# action_run_time_diff[machine_index] = abs(
# after_machines_run_time[machine_index] - job_run_time
# )
# 非支配排序
# 问题来了 如果希望有个权重如何解决呢?
action_value = np.concatenate(([action_std], [action_run_time]))
# action_value = np.concatenate(([action_std], [action_run_time_diff]))
front = self.fast_non_dominated_sort(action_value)
# 输出action
action = random.sample(front[0], 1)[0]
return action
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = NSG(args.machine_num)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "nsg"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
model_save_path = os.path.join(save_dir, "models")
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
os.makedirs(model_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(test_one_path, args=(args, i, data_save_path, fig_save_path))
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(std_fitness[0], std_fitness[1])
)
print("done")
| 9,860 | 34.092527 | 100 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_nn_load.py
|
import os
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# checkpoint_path = "output/train/nsga/run02/elite/g3382_0/15_-349.95341_-19.68042.pth"
# checkpoint_path = "output/one_job/ga/reward_sum/run02_m15/final_population/g_9796_f_-310.773_-0.026/24_f_-308.432_-0.024.pth"
agent = Agent()
# state_dict = torch.load("24_f_-342.436_-0.029.pth")
# agent.load_state_dict(state_dict)
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"job_num_{seq_index}.npy"),
machines_job_num_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"end_job_num_{seq_index}.npy"),
machines_job_num_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.job_seq_num = 5
args.method = "igd"
args.tag = "user_load_test02"
args.actual = True
# args.checkpoint_path = "output/train/nsga/run03/elite/g1_1/20_-501.30449_-25.49838.pth"
# args.checkpoint_path = "output/train/nsga/run05/elite/g24214_0/10_-351.04309_-20.52227.pth"
args.checkpoint_path = (
"output/train/ns_deepjs/run02_no_mask/models/e13919_s9_d380.7892_b22.2165"
)
job_num_list = range(2, 10)
# user_sigam_list = [0]
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for max_job_num in job_num_list:
user_load_rate = (
max_job_num
/ 2
* args.max_res_req
/ 2
* args.max_job_len
/ 2
/ args.res_capacity
/ args.machine_num
)
if user_load_rate > 1.1:
break
print(f"Test user_load_rate {user_load_rate:.3f}")
save_dir = os.path.join(
root_dir,
f"user_load_rate_{user_load_rate:.3f}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.max_job_num = max_job_num
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_load_rate, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_load_rate",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_user_load_exp.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_load_rate",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"{ args.method}_user_load_exp2.csv"))
| 11,123 | 32.506024 | 131 |
py
|
MERL-LB
|
MERL-LB-main/mp.py
|
import torch
from torchvision import datasets, transforms
from tqdm import tqdm
device_ids = [0, 1, 2, 3] # 可用GPU
BATCH_SIZE = 64
transform = transforms.Compose([transforms.ToTensor()])
data_train = datasets.MNIST(root="./data/", transform=transform, train=True, download=True)
data_test = datasets.MNIST(root="./data/", transform=transform, train=False)
data_loader_train = torch.utils.data.DataLoader(
dataset=data_train,
# 单卡batch size * 卡数
batch_size=BATCH_SIZE * len(device_ids),
shuffle=True,
num_workers=2,
)
data_loader_test = torch.utils.data.DataLoader(
dataset=data_test, batch_size=BATCH_SIZE * len(device_ids), shuffle=True, num_workers=2
)
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = torch.nn.Sequential(
torch.nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(stride=2, kernel_size=2),
)
self.dense = torch.nn.Sequential(
torch.nn.Linear(14 * 14 * 128, 1024),
torch.nn.ReLU(),
torch.nn.Dropout(p=0.5),
torch.nn.Linear(1024, 10),
)
def forward(self, x):
x = self.conv1(x)
x = x.view(-1, 14 * 14 * 128)
x = self.dense(x)
return x
model = Model()
# 指定要用到的设备
model = torch.nn.DataParallel(model, device_ids=device_ids)
# 模型加载到设备0
model = model.cuda()
cost = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
n_epochs = 50
for epoch in range(n_epochs):
running_loss = 0.0
running_correct = 0
print("Epoch {}/{}".format(epoch, n_epochs))
print("-" * 10)
for data in tqdm(data_loader_train):
X_train, y_train = data
# 指定设备0
X_train, y_train = X_train.cuda(), y_train.cuda()
outputs = model(X_train)
_, pred = torch.max(outputs.data, 1)
optimizer.zero_grad()
loss = cost(outputs, y_train)
loss.backward()
optimizer.step()
running_loss += loss.data.item()
running_correct += torch.sum(pred == y_train.data)
testing_correct = 0
for data in data_loader_test:
X_test, y_test = data
# 指定设备1
X_test, y_test = X_test.cuda(), y_test.cuda()
outputs = model(X_test)
_, pred = torch.max(outputs.data, 1)
testing_correct += torch.sum(pred == y_test.data)
print(
"Loss is:{:.4f}, Train Accuracy is:{:.4f}%, Test Accuracy is:{:.4f}".format(
torch.true_divide(running_loss, len(data_train)),
torch.true_divide(100 * running_correct, len(data_train)),
torch.true_divide(100 * testing_correct, len(data_test)),
)
)
torch.save(model.state_dict(), "model_parameter.pkl")
| 2,903 | 30.225806 | 91 |
py
|
MERL-LB
|
MERL-LB-main/mp_train_nn_nsga2_one.py
|
import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.ga import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
if args.ga_fitness_type == "std":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.sum(machines_occupancy_mean_std)
fitness = -std_fitness
elif args.ga_fitness_type == "runtime":
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
fitness = -runtime_fitness
elif args.ga_fitness_type == "double":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
fitness = np.array([-runtime_fitness, -std_fitness])
return id, fitness
class GA:
def __init__(self, args):
self.args = args
self.p_size = args.ga_parent_size
self.c_size = args.ga_children_size
self.job_genes_len = 0
self.mutate_rate = args.ga_mutate_rate
self.mutate_scale = args.ga_mutate_scale
self.population: List[Individual] = []
self.elitism_population: List[Individual] = []
self.avg_fitness = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
self.generation = 0
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def generate_ancestor(self):
for _ in range(self.p_size):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def inherit_ancestor(self):
"""Load genes(nn model parameters) from file."""
for i in range(self.p_size):
pth = os.path.join("model", "all_individual", str(i) + "_nn.pth")
nn = torch.load(pth)
genes = []
with torch.no_grad():
for parameters in nn.parameters():
genes.extend(parameters.numpy().flatten())
self.population.append(Individual(np.array(genes)))
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
mutation_array = np.random.random(c_genes.shape) < self.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
# def elitism_selection(self):
# # 归一化
# fitness_list = []
# for individual in self.population:
# fitness_list.append(individual.train_fitness)
# fitness_list = np.array(fitness_list)
# norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
# np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
# )
# # 权重相加排序
# norm_fitness_list = np.sum(
# norm_fitness_list * self.args.ga_fitness_wight, axis=-1
# )
# population_sorted_index = np.argsort(norm_fitness_list) # 升序取后面几位
# population_sorted_index = population_sorted_index[-self.p_size :]
# self.elitism_population = [
# self.population[index] for index in population_sorted_index
# ]
# self.avg_fitness = np.mean(fitness_list[population_sorted_index], axis=0)
# self.elitism_norm_fitness_list = norm_fitness_list[population_sorted_index]
def elitism_selection(self):
# 归一化值
fitness_list = []
for individual in self.population:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
)
# 快速非支配排序越小越好 所以转换为正数
fm_fitness_list = -np.array(fitness_list).T
# 快速非支配排序
front_list = self.fast_non_dominated_sort(fm_fitness_list)
# 拥挤度计算
crowded_distance_list = []
for front in front_list:
front_values = fm_fitness_list[:, front]
crowded_distance = self.crowded_distance(front_values)
crowded_distance_list.append(crowded_distance)
# 精英选择
elitism_index = []
save_best_front = False
for front, crowded_distance in zip(front_list, crowded_distance_list):
# 保存最前沿模型
if not save_best_front:
best_front_population = []
for index in front:
best_front_population.append(self.population[index])
self.best_front_population = best_front_population
save_best_front = True
# 根据拥挤度排序
front = np.array(front)
sorted_index = np.argsort(crowded_distance) # 升序排序
sorted_front = front[sorted_index[::-1]] # 降序排序取拥挤度大的
# 选择精英
# 选择的个数是不是可以定义?
for index in sorted_front:
if len(elitism_index) < self.p_size:
elitism_index.append(index)
else:
break
# [0.5, 05] 权重相加排序
norm_fitness_list = np.sum(norm_fitness_list * self.args.ga_fitness_wight, axis=-1)
elitism_population = [self.population[index] for index in elitism_index]
# 检查精英变化数量
elite_change_num = len(elitism_population)
for elite in elitism_population:
if elite in self.elitism_population:
elite_change_num -= 1
self.elitism_population = elitism_population
self.fitness_list = fitness_list
self.avg_fitness = np.mean(fitness_list[elitism_index], axis=0)
self.elitism_norm_fitness_list = norm_fitness_list[elitism_index]
return elite_change_num
# 轮盘赌选择子代
def roulette_wheel_selection(self, size) -> List[Individual]:
# 值越大被取到的概率就越大
selection = []
wheel = sum(self.elitism_norm_fitness_list)
for _ in range(size):
pick = np.random.uniform(0, wheel)
current = 0
for i, individual_fitness in enumerate(self.elitism_norm_fitness_list):
current += individual_fitness
if current > pick:
selection.append(self.elitism_population[i])
break
return selection
# 随机选择
def random_select_parent(self, size):
# 随机选择两个父代
selection = random.sample(self.elitism_population, size)
return selection
# 产生子代
def generate_children(self):
children_population = []
while len(children_population) < self.c_size:
# p1, p2 = self.roulette_wheel_selection(2)
p1, p2 = self.random_select_parent(2)
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
children_population.extend([c1, c2])
self.children_population = children_population
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
# 进化
def evolve(self):
# 普通循环测试
# population = []
# for individual in self.population:
# individual = run_individual_in_env(
# self.args,
# individual.job_genes,
# self.seq_index,
# )
# population.append(individual)
# 多进程
population_num = self.args.ga_parent_size + self.args.ga_children_size
pool_num = min(cpu_count(), population_num)
print(f"use {pool_num} cup core")
pool = Pool(pool_num)
mutil_process = []
for id, individual in enumerate(self.population):
if individual.train_fitness is not None:
continue
# 在坏境中运行个体获得个体适应度
one_process = pool.apply_async(
run_individual_in_env,
args=(
id,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id, fitness = one_process.get()
self.population[id].train_fitness = fitness
# 保存所有结果
self.save_population(self.population, "all")
# 精英选择
elite_change_num = self.elitism_selection()
# 保存精英
elite_fitness_list = self.save_population(self.elitism_population, "elite")
# 子代生成
self.generate_children()
new_population = []
new_population.extend(self.elitism_population)
new_population.extend(self.children_population)
self.population = new_population
self.seq_index = (self.seq_index + 1) % self.seq_num
self.generation += 1
return elite_change_num, elite_fitness_list
# 值排序
def sort_by_values(self, values):
# 升序排序
sorted_index_list = []
for value in values:
sorted_index = np.argsort(value)
sorted_index_list.append(sorted_index)
return sorted_index_list
# 拥挤度计算
def crowded_distance(self, values):
distances = []
sorted_index_list = self.sort_by_values(values) # 升序排序
for value, sorted_index in zip(values, sorted_index_list):
distance = np.ones(len(sorted_index)) * 1e5
for i in range(1, len(sorted_index) - 1):
pre_index = sorted_index[i - 1]
curr_index = sorted_index[i]
after_index = sorted_index[i + 1]
distance[curr_index] = (value[after_index] - value[pre_index]) / (
max(value) - min(value)
)
distances.append(distance)
distances = np.array(distances)
distance = np.sum(distances, axis=0)
return distance
# 快速非支配排序
def fast_non_dominated_sort(self, values):
# 值越小越好
values11 = values[0] # 函数1解集
S = [[] for _ in range(0, len(values11))] # 存放 每个个体支配解的集合
front = [[]] # 存放群体的级别集合,一个级别对应一个[]
n = [0 for _ in range(0, len(values11))] # 每个个体被支配解的个数 即针对每个解 存放有多少好于这个解的个数
rank = [np.inf for _ in range(0, len(values11))] # 存放每个个体的级别
# 遍历每一个个体得到各个个体的被支配解个数和支配解集合
# 目标函数值越小越好
for p in range(0, len(values11)):
S[p] = [] # 该个体支配解的集合 即存放差于该解的解
n[p] = 0 # 该个体被支配的解的个数初始化为0 即找到有多少好于该解
for q in range(0, len(values11)): # 遍历每一个个体
less = 0 # 的目标函数值小于p个体的目标函数值数目
equal = 0 # 的目标函数值等于p个体的目标函数值数目
greater = 0 # 的目标函数值大于p个体的目标函数值数目
for k in range(len(values)): # 遍历每一个目标函数
if values[k][p] > values[k][q]: # 目标函数k时 q个体值小于p个体
less = less + 1 # q比p 好
if values[k][p] == values[k][q]: # 目标函数k时 p个体值等于于q个体
equal = equal + 1
if values[k][p] < values[k][q]: # 目标函数k时 q个体值大于p个体
greater = greater + 1 # q比p差
if (less + equal == len(values)) and (equal != len(values)):
n[p] = n[p] + 1 # q比好 比p好的个体个数加1
elif (greater + equal == len(values)) and (equal != len(values)):
S[p].append(q) # q比p差 存放比p差的个体解序号
# 找出Pareto最优解 即n[p]=0的个体p序号
if n[p] == 0:
rank[p] = 0 # 序号为p的个体 等级为0即最优
if p not in front[0]:
# 如果p不在第0层中 将其追加到第0层中
front[0].append(p) # 存放Pareto最优解序号
# 划分各层解
i = 0
while front[i] != []: # 如果分层集合为不为空
Q = []
for p in front[i]: # 遍历当前分层集合的各个个体p
for q in S[p]: # 遍历p个体的每个支配解q
n[q] = n[q] - 1 # 则将支配解中所有给对应的个体np-1
if n[q] == 0:
rank[q] = i + 1
if q not in Q:
Q.append(q) # 存放front=i+1的个体序号
i = i + 1 # front等级+1
front.append(Q)
del front[len(front) - 1] # 删除循环退出时i+1产生的[]
return front # 返回各层的解序号集合 类似[[1],[9],[0, 8],[7, 6],[3, 5],[2, 4]]
if __name__ == "__main__":
args = parse_args()
args.method = "nsga"
args.job_seq_num = 1
args.tag = "run05"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
ga = GA(args)
ga.setup_seed()
if args.ga_choice == "generate":
ga.generate_ancestor()
else:
ga.inherit_ancestor()
fitness_list = []
mean_best_fitness = [-np.inf] * args.ga_fitness_num
while True:
print("=" * 100)
print(f"evolve generation {ga.generation}")
elite_change_num, elite_fitness_list = ga.evolve()
# log to tensorbord
writer.add_scalar("Elite change num", elite_change_num, ga.generation)
elite_fitness_list = np.array(elite_fitness_list)
elite_fitness_list = -elite_fitness_list[:, -2:]
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, ga.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Balance fitness max", max_elite_fitness[1], ga.generation)
writer.add_scalar("Duration fitness max", max_elite_fitness[0], ga.generation)
writer.add_scalar("Balance fitness min", min_elite_fitness[1], ga.generation)
writer.add_scalar("Duration fitness min", min_elite_fitness[0], ga.generation)
| 21,461 | 34.299342 | 96 |
py
|
MERL-LB
|
MERL-LB-main/mp_train_nn_nsga2.py
|
import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.ga import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
if args.ga_fitness_type == "std":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.sum(machines_occupancy_mean_std)
fitness = -std_fitness
elif args.ga_fitness_type == "runtime":
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
fitness = -runtime_fitness
elif args.ga_fitness_type == "double":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
fitness = np.array([-runtime_fitness, -std_fitness])
return id, fitness
class GA:
def __init__(self, args):
self.args = args
self.p_size = args.ga_parent_size
self.c_size = args.ga_children_size
self.job_genes_len = 0
self.mutate_rate = args.ga_mutate_rate
self.mutate_scale = args.ga_mutate_scale
self.population: List[Individual] = []
self.elitism_population: List[Individual] = []
self.avg_fitness = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
self.generation = 0
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def generate_ancestor(self):
for _ in range(self.p_size):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def inherit_ancestor(self):
"""Load genes(nn model parameters) from file."""
for i in range(self.p_size):
pth = os.path.join("model", "all_individual", str(i) + "_nn.pth")
nn = torch.load(pth)
genes = []
with torch.no_grad():
for parameters in nn.parameters():
genes.extend(parameters.numpy().flatten())
self.population.append(Individual(np.array(genes)))
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
mutation_array = np.random.random(c_genes.shape) < self.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
# def elitism_selection(self):
# # 归一化
# fitness_list = []
# for individual in self.population:
# fitness_list.append(individual.train_fitness)
# fitness_list = np.array(fitness_list)
# norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
# np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
# )
# # 权重相加排序
# norm_fitness_list = np.sum(
# norm_fitness_list * self.args.ga_fitness_wight, axis=-1
# )
# population_sorted_index = np.argsort(norm_fitness_list) # 升序取后面几位
# population_sorted_index = population_sorted_index[-self.p_size :]
# self.elitism_population = [
# self.population[index] for index in population_sorted_index
# ]
# self.avg_fitness = np.mean(fitness_list[population_sorted_index], axis=0)
# self.elitism_norm_fitness_list = norm_fitness_list[population_sorted_index]
def elitism_selection(self):
# 归一化值
fitness_list = []
for individual in self.population:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
)
# 快速非支配排序越小越好 所以转换为正数
fm_fitness_list = -np.array(fitness_list).T
# 快速非支配排序
front_list = self.fast_non_dominated_sort(fm_fitness_list)
# 拥挤度计算
crowded_distance_list = []
for front in front_list:
front_values = fm_fitness_list[:, front]
crowded_distance = self.crowded_distance(front_values)
crowded_distance_list.append(crowded_distance)
# 精英选择
elitism_index = []
save_best_front = False
for front, crowded_distance in zip(front_list, crowded_distance_list):
# 保存最前沿模型
if not save_best_front:
best_front_population = []
for index in front:
best_front_population.append(self.population[index])
self.best_front_population = best_front_population
save_best_front = True
# 根据拥挤度排序
front = np.array(front)
sorted_index = np.argsort(crowded_distance) # 升序排序
sorted_front = front[sorted_index[::-1]] # 降序排序取拥挤度大的
# 选择精英
# 选择的个数是不是可以定义?
for index in sorted_front:
if len(elitism_index) < self.p_size:
elitism_index.append(index)
else:
break
# [0.5, 05] 权重相加排序
norm_fitness_list = np.sum(norm_fitness_list * self.args.ga_fitness_wight, axis=-1)
elitism_population = [self.population[index] for index in elitism_index]
# 检查精英变化数量
elite_change_num = len(elitism_population)
for elite in elitism_population:
if elite in self.elitism_population:
elite_change_num -= 1
self.elitism_population = elitism_population
self.fitness_list = fitness_list
self.avg_fitness = np.mean(fitness_list[elitism_index], axis=0)
self.elitism_norm_fitness_list = norm_fitness_list[elitism_index]
return elite_change_num
# 轮盘赌选择子代
def roulette_wheel_selection(self, size) -> List[Individual]:
# 值越大被取到的概率就越大
selection = []
wheel = sum(self.elitism_norm_fitness_list)
for _ in range(size):
pick = np.random.uniform(0, wheel)
current = 0
for i, individual_fitness in enumerate(self.elitism_norm_fitness_list):
current += individual_fitness
if current > pick:
selection.append(self.elitism_population[i])
break
return selection
# 随机选择
def random_select_parent(self, size):
# 随机选择两个父代
selection = random.sample(self.elitism_population, size)
return selection
# 产生子代
def generate_children(self):
children_population = []
while len(children_population) < self.c_size:
# p1, p2 = self.roulette_wheel_selection(2)
p1, p2 = self.random_select_parent(2)
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
children_population.extend([c1, c2])
self.children_population = children_population
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
# 进化
def evolve(self):
# 普通循环测试
# population = []
# for individual in self.population:
# individual = run_individual_in_env(
# self.args,
# individual.job_genes,
# self.seq_index,
# )
# population.append(individual)
# 多进程
population_num = self.args.ga_parent_size + self.args.ga_children_size
pool_num = min(cpu_count(), population_num)
print(f"use {pool_num} cup core")
pool = Pool(pool_num)
mutil_process = []
for id, individual in enumerate(self.population):
# 在坏境中运行个体获得个体适应度
one_process = pool.apply_async(
run_individual_in_env,
args=(
id,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id, fitness = one_process.get()
self.population[id].train_fitness = fitness
# 保存所有结果
self.save_population(self.population, "all")
# 精英选择
elite_change_num = self.elitism_selection()
# 保存精英
elite_fitness_list = self.save_population(self.elitism_population, "elite")
# 子代生成
self.generate_children()
new_population = []
new_population.extend(self.elitism_population)
new_population.extend(self.children_population)
self.population = new_population
self.seq_index = (self.seq_index + 1) % self.seq_num
self.generation += 1
return elite_change_num, elite_fitness_list
# 值排序
def sort_by_values(self, values):
# 升序排序
sorted_index_list = []
for value in values:
sorted_index = np.argsort(value)
sorted_index_list.append(sorted_index)
return sorted_index_list
# 拥挤度计算
def crowded_distance(self, values):
distances = []
sorted_index_list = self.sort_by_values(values) # 升序排序
for value, sorted_index in zip(values, sorted_index_list):
distance = np.ones(len(sorted_index)) * 1e5
for i in range(1, len(sorted_index) - 1):
pre_index = sorted_index[i - 1]
curr_index = sorted_index[i]
after_index = sorted_index[i + 1]
distance[curr_index] = (value[after_index] - value[pre_index]) / (
max(value) - min(value)
)
distances.append(distance)
distances = np.array(distances)
distance = np.sum(distances, axis=0)
return distance
# 快速非支配排序
def fast_non_dominated_sort(self, values):
# 值越小越好
values11 = values[0] # 函数1解集
S = [[] for _ in range(0, len(values11))] # 存放 每个个体支配解的集合
front = [[]] # 存放群体的级别集合,一个级别对应一个[]
n = [0 for _ in range(0, len(values11))] # 每个个体被支配解的个数 即针对每个解 存放有多少好于这个解的个数
rank = [np.inf for _ in range(0, len(values11))] # 存放每个个体的级别
# 遍历每一个个体得到各个个体的被支配解个数和支配解集合
# 目标函数值越小越好
for p in range(0, len(values11)):
S[p] = [] # 该个体支配解的集合 即存放差于该解的解
n[p] = 0 # 该个体被支配的解的个数初始化为0 即找到有多少好于该解
for q in range(0, len(values11)): # 遍历每一个个体
less = 0 # 的目标函数值小于p个体的目标函数值数目
equal = 0 # 的目标函数值等于p个体的目标函数值数目
greater = 0 # 的目标函数值大于p个体的目标函数值数目
for k in range(len(values)): # 遍历每一个目标函数
if values[k][p] > values[k][q]: # 目标函数k时 q个体值小于p个体
less = less + 1 # q比p 好
if values[k][p] == values[k][q]: # 目标函数k时 p个体值等于于q个体
equal = equal + 1
if values[k][p] < values[k][q]: # 目标函数k时 q个体值大于p个体
greater = greater + 1 # q比p差
if (less + equal == len(values)) and (equal != len(values)):
n[p] = n[p] + 1 # q比好 比p好的个体个数加1
elif (greater + equal == len(values)) and (equal != len(values)):
S[p].append(q) # q比p差 存放比p差的个体解序号
# 找出Pareto最优解 即n[p]=0的个体p序号
if n[p] == 0:
rank[p] = 0 # 序号为p的个体 等级为0即最优
if p not in front[0]:
# 如果p不在第0层中 将其追加到第0层中
front[0].append(p) # 存放Pareto最优解序号
# 划分各层解
i = 0
while front[i] != []: # 如果分层集合为不为空
Q = []
for p in front[i]: # 遍历当前分层集合的各个个体p
for q in S[p]: # 遍历p个体的每个支配解q
n[q] = n[q] - 1 # 则将支配解中所有给对应的个体np-1
if n[q] == 0:
rank[q] = i + 1
if q not in Q:
Q.append(q) # 存放front=i+1的个体序号
i = i + 1 # front等级+1
front.append(Q)
del front[len(front) - 1] # 删除循环退出时i+1产生的[]
return front # 返回各层的解序号集合 类似[[1],[9],[0, 8],[7, 6],[3, 5],[2, 4]]
if __name__ == "__main__":
args = parse_args()
args.method = "nsga"
args.job_seq_num = 1
args.tag = "run01"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
ga = GA(args)
ga.setup_seed()
if args.ga_choice == "generate":
ga.generate_ancestor()
else:
ga.inherit_ancestor()
fitness_list = []
mean_best_fitness = [-np.inf] * args.ga_fitness_num
while True:
print("=" * 100)
print(f"evolve generation {ga.generation}")
elite_change_num, elite_fitness_list = ga.evolve()
# log to tensorbord
writer.add_scalar("Train/Elite change num", elite_change_num, ga.generation)
elite_fitness_list = np.array(elite_fitness_list)
elite_fitness_list = -elite_fitness_list[:, -2:]
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Train/Target distribution", figure, ga.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Train/Balance fitness max", max_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness max", max_elite_fitness[0], ga.generation)
writer.add_scalar("Train/Balance fitness min", min_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness min", min_elite_fitness[0], ga.generation)
| 21,419 | 34.346535 | 96 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_fixed_lg.py
|
import os
import numpy as np
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class LG:
def select_action(self, obs):
_, job_run_time, _, machines_run_time, _, action_mask = obs
gap = np.abs(machines_run_time - job_run_time)
gap[action_mask == False] = 1e9
action = np.argmin(gap)
return action
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = LG()
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "lg"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
model_save_path = os.path.join(save_dir, "models")
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
os.makedirs(model_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(test_one_path, args=(args, i, data_save_path, fig_save_path))
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(std_fitness[0], std_fitness[1])
)
print("done")
| 4,714 | 31.07483 | 100 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_nn_nsga.py
|
import os
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# checkpoint_path = "output/train/nsga/run02/elite/g3382_0/15_-349.95341_-19.68042.pth"
# checkpoint_path = "output/one_job/ga/reward_sum/run02_m15/final_population/g_9796_f_-310.773_-0.026/24_f_-308.432_-0.024.pth"
agent = Agent()
# state_dict = torch.load("24_f_-342.436_-0.029.pth")
# agent.load_state_dict(state_dict)
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"job_num_{seq_index}.npy"),
machines_job_num_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"end_job_num_{seq_index}.npy"),
machines_job_num_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "wsga"
args.tag = "t30_wsga_run05_g13440"
args.max_time = 30 * 60
args.job_seq_num = 5
args.actual = True
root_path = "output/train/wsga/run05/elite/g13440_0"
file_names = os.listdir(root_path)
file_names.remove("mean_fitness_record.npy")
scores = [item.split(".pth")[0] for item in file_names]
scores = ["".join(item.split("-")) for item in scores]
scores = [list(map(float, item.split("_")[1:])) for item in scores]
b_score = np.array(scores)[:, 1]
index = np.argsort(b_score)
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result1 = []
result2 = []
for i in index:
file_name = file_names[i]
args.checkpoint_path = os.path.join(root_path, file_name)
score = scores[i]
print(f"Test b{score[1]:.3f} d{score[0]:.3f}")
save_dir = os.path.join(
root_dir,
f"b{score[1]:.3f}_d{score[0]:.3f}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for j in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, j, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((i, score[1], score[0], std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
result1.append((i, score[1], score[0], *mean_fitness, *std_fitness))
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean balance fitness: {:.4f} mean duration fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std balance fitness: {:.4f} std duration fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result1,
columns=[
"id",
"train_balance",
"train_duration",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_mean_std.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"id",
"train_balance",
"train_duration",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"all_data.csv"))
| 11,135 | 32.643505 | 131 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_sigma.py
|
import os
import random
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
device = torch.device("cpu")
class RR:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
self.action_index = 0
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action = self.action_index
for i in range(self.machine_num):
action = (action + 1) % self.machine_num
if action_mask[action] == True:
self.action_index = action
break
return action
class RD:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action_prob = np.random.random(self.machine_num)
action_prob = (action_prob + action_mask) / 2
action = np.argmax(action_prob)
return action
class LG:
def select_action(self, obs):
_, job_run_time, _, machines_run_time, _, action_mask = obs
gap = np.abs(machines_run_time - job_run_time)
gap[action_mask == False] = 1e9
action = np.argmin(gap)
return action
class LC:
def select_action(self, obs):
_, _, _, _, jobs_num, action_mask = obs
jobs_num[action_mask == False] = 1e9
action = np.argmin(jobs_num)
return action
class Actor(nn.Module):
def __init__(self, absolute=True, dim_list=[126, 32, 1]):
super().__init__()
self.absolute = absolute
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
if not self.absolute:
action_prob = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_prob)
action = action_dist.sample()
self.action_logprobs = action_dist.log_prob(action).detach()
action = action.cpu().item()
else:
action = torch.argmax(predict, dim=1).cpu().item()
return action
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self, absolute=True):
super(Agent, self).__init__()
self.job_actor = Actor(absolute=absolute)
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def get_agent(args):
method = args.method
if method == "rr":
agent = RR(args.machine_num)
elif method == "rd":
agent = RD(args.machine_num)
elif method == "lg":
agent = LG()
elif method == "lc":
agent = LC()
elif method in ["nsga", "wsga", "deepjs", "igd", "nei_nsga"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
agent.job_actor.eval()
elif method in ["ppo"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
agent.job_actor.eval()
return agent
def set_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = get_agent(args)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
with torch.no_grad():
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
del agent
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "igd"
args.tag = "user_sigam_test02"
args.actual = False
# args.checkpoint_path = "output/train/nsga/run03/elite/g1_1/20_-501.30449_-25.49838.pth"
# args.checkpoint_path = "output/train/nsga/run05/elite/g24214_0/10_-351.04309_-20.52227.pth"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/0_-335.70133_-14.49433.pth"
# args.checkpoint_path = (
# "output/train/ns_deepjs/run02_no_mask/models/e10000_s0_d401.1772_b15.8262"
# )
args.checkpoint_path = (
"output/train/ns_deepjs/run02_no_mask/models/e13919_s9_d380.7892_b22.2165"
)
# args.checkpoint_path = "output/train/nei_nsga/g30000_0/12_-218.78153_-174.13751.pth"
# job_num_list = range(2, 10)
user_sigam_list = np.linspace(0, 7.5 * 60 // 3, 6, dtype=np.int32)
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for user_sigma in user_sigam_list:
# user_load_rate = (
# max_job_num
# / 2
# * args.max_res_req
# / 2
# * args.max_job_len
# / 2
# / args.res_capacity
# / args.machine_num
# )
# if user_load_rate > 1.1:
# break
print(f"Test user_sigma {user_sigma}")
save_dir = os.path.join(
root_dir,
f"user_sigma_{user_sigma}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.user_sigma = user_sigma
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
# pool = Pool(10)
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_sigma // 5, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_sigma",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"mean_std.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_sigma",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"all_data.csv"))
| 13,433 | 31.686131 | 97 |
py
|
MERL-LB
|
MERL-LB-main/mp_test_load.py
|
import os
import random
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class RR:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
self.action_index = 0
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action = self.action_index
for i in range(self.machine_num):
action = (action + 1) % self.machine_num
if action_mask[action] == True:
self.action_index = action
break
return action
class RD:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action_prob = np.random.random(self.machine_num)
action_prob = (action_prob + action_mask) / 2
action = np.argmax(action_prob)
return action
class LG:
def select_action(self, obs):
_, job_run_time, _, machines_run_time, _, action_mask = obs
gap = np.abs(machines_run_time - job_run_time)
gap[action_mask == False] = 1e9
action = np.argmin(gap)
return action
class LC:
def select_action(self, obs):
_, _, _, _, jobs_num, action_mask = obs
jobs_num[action_mask == False] = 1e9
action = np.argmin(jobs_num)
return action
class Actor(nn.Module):
def __init__(self, absolute=True, dim_list=[126, 32, 1]):
super().__init__()
self.absolute = absolute
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
if not self.absolute:
action_prob = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_prob)
action = action_dist.sample()
self.action_logprobs = action_dist.log_prob(action).detach()
action = action.cpu().item()
else:
action = torch.argmax(predict, dim=1).cpu().item()
return action
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self, absolute=True):
super(Agent, self).__init__()
self.job_actor = Actor(absolute=absolute)
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def get_agent(args):
method = args.method
if method == "rr":
agent = RR(args.machine_num)
elif method == "rd":
agent = RD(args.machine_num)
elif method == "lg":
agent = LG()
elif method == "lc":
agent = LC()
elif method in ["nsga", "wsga", "deepjs", "igd", "nei_nsga"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
elif method in ["ppo"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
return agent
def set_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = get_agent(args)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "ppo"
args.tag = "user_load_test02"
args.actual = True
# args.checkpoint_path = "output/train/nsga/run03/elite/g1_1/20_-501.30449_-25.49838.pth"
# args.checkpoint_path = "output/train/nsga/run05/elite/g24214_0/10_-351.04309_-20.52227.pth"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/0_-335.70133_-14.49433.pth"
# args.checkpoint_path = (
# "output/train/ns_deepjs/run02_no_mask/models/e10000_s0_d401.1772_b15.8262"
# )
args.checkpoint_path = "output/train/ppo/run_0/model/e16679_s9_d376.1445_b18.8828_actor.pth"
job_num_list = range(2, 10)
# user_sigam_list = [0]
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for max_job_num in job_num_list:
user_load_rate = (
max_job_num
/ 2
* args.max_res_req
/ 2
* args.max_job_len
/ 2
/ args.res_capacity
/ args.machine_num
)
if user_load_rate > 1.1:
break
print(f"Test user_load_rate {user_load_rate:.3f}")
save_dir = os.path.join(
root_dir,
f"user_load_rate_{user_load_rate:.3f}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.max_job_num = max_job_num
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(10)
# pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_load_rate, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_load_rate",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"mean_std.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_load_rate",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"all_data.csv"))
| 13,101 | 31.59204 | 97 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.