repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
rancher
|
rancher-master/tests/integration/suite/test_default_roles.py
|
import pytest
import json
from .common import random_str
from .conftest import wait_for_condition, wait_until, wait_for
CREATOR_ANNOTATION = 'authz.management.cattle.io/creator-role-bindings'
systemProjectLabel = "authz.management.cattle.io/system-project"
defaultProjectLabel = "authz.management.cattle.io/default-project"
@pytest.fixture
def cleanup_roles(request, admin_mc):
"""Resets global roles and role remplates back to the server default:
global role == 'user'
cluster create == 'cluster-owner'
project create == 'project-owner'
"""
client = admin_mc.client
def _cleanup():
for role in client.list_role_template():
if role.id == 'cluster-owner':
client.update(role, clusterCreatorDefault=True,
projectCreatorDefault=False, locked=False)
elif role.id == 'project-owner':
client.update(role, clusterCreatorDefault=False,
projectCreatorDefault=True, locked=False)
elif (role.clusterCreatorDefault or role.projectCreatorDefault or
role.locked):
client.update(role, clusterCreatorDefault=False,
projectCreatorDefault=False, locked=False)
for role in client.list_global_role():
if role.id == 'user':
client.update(role, newUserDefault=True)
elif role.newUserDefault:
client.update(role, newUserDefault=False)
request.addfinalizer(_cleanup)
@pytest.mark.nonparallel
def test_cluster_create_default_role(admin_mc, cleanup_roles, remove_resource):
test_roles = ['projects-create', 'storage-manage', 'nodes-view']
client = admin_mc.client
set_role_state(client, test_roles, 'cluster')
cluster = client.create_cluster(name=random_str())
remove_resource(cluster)
wait_for_condition('InitialRolesPopulated', 'True', client, cluster)
cluster = client.reload(cluster)
data_dict = json.loads(cluster.annotations[CREATOR_ANNOTATION])
assert len(cluster.clusterRoleTemplateBindings()) == 3
assert set(data_dict['created']) == set(data_dict['required'])
assert set(data_dict['created']) == set(test_roles)
for binding in cluster.clusterRoleTemplateBindings():
def binding_principal_validate():
bind = client.by_id_cluster_role_template_binding(binding.id)
if bind.userPrincipalId is None:
return False
return bind
binding = wait_for(binding_principal_validate)
assert binding.roleTemplateId in test_roles
assert binding.userId is not None
user = client.by_id_user(binding.userId)
assert binding.userPrincipalId in user.principalIds
@pytest.mark.nonparallel
def test_cluster_create_role_locked(admin_mc, cleanup_roles, remove_resource):
test_roles = ['projects-create', 'storage-manage', 'nodes-view']
client = admin_mc.client
set_role_state(client, test_roles, 'cluster')
# Grab a role to lock
locked_role = test_roles.pop()
# Lock the role
client.update(client.by_id_role_template(locked_role), locked=True)
cluster = client.create_cluster(name=random_str())
remove_resource(cluster)
wait_for_condition('InitialRolesPopulated', 'True', client, cluster)
cluster = client.reload(cluster)
data_dict = json.loads(cluster.annotations[CREATOR_ANNOTATION])
assert len(cluster.clusterRoleTemplateBindings()) == 2
assert set(data_dict['created']) == set(data_dict['required'])
assert set(data_dict['created']) == set(test_roles)
for binding in cluster.clusterRoleTemplateBindings():
assert binding.roleTemplateId in test_roles
@pytest.mark.nonparallel
def test_project_create_default_role(admin_mc, cleanup_roles, remove_resource):
test_roles = ['project-member', 'workloads-view', 'secrets-view']
client = admin_mc.client
set_role_state(client, test_roles, 'project')
project = client.create_project(name=random_str(), clusterId='local')
remove_resource(project)
wait_for_condition('InitialRolesPopulated', 'True', client, project)
project = client.reload(project)
data_dict = json.loads(project.annotations[
CREATOR_ANNOTATION])
assert len(project.projectRoleTemplateBindings()) == 3
assert set(data_dict['required']) == set(test_roles)
for binding in project.projectRoleTemplateBindings():
def binding_principal_validate():
bind = client.by_id_project_role_template_binding(binding.id)
if bind.userPrincipalId is None:
return False
return bind
binding = wait_for(binding_principal_validate)
assert binding.roleTemplateId in test_roles
assert binding.userId is not None
user = client.by_id_user(binding.userId)
assert binding.userPrincipalId in user.principalIds
@pytest.mark.nonparallel
def test_project_create_role_locked(admin_mc, cleanup_roles, remove_resource):
"""Test a locked role that is set to default is not applied
"""
test_roles = ['project-member', 'workloads-view', 'secrets-view']
client = admin_mc.client
set_role_state(client, test_roles, 'project')
# Grab a role to lock
locked_role = test_roles.pop()
# Lock the role
client.update(client.by_id_role_template(locked_role), locked=True)
# Wait for role to get updated
wait_for(lambda: client.by_id_role_template(locked_role)['locked'] is True,
fail_handler=lambda: "Failed to lock role"+locked_role)
project = client.create_project(name=random_str(), clusterId='local')
remove_resource(project)
wait_for_condition('InitialRolesPopulated', 'True', client, project)
project = client.reload(project)
data_dict = json.loads(project.annotations[
CREATOR_ANNOTATION])
assert len(project.projectRoleTemplateBindings()) == 2
assert set(data_dict['required']) == set(test_roles)
for binding in project.projectRoleTemplateBindings():
assert binding.roleTemplateId in test_roles
@pytest.mark.nonparallel
def test_user_create_default_role(admin_mc, cleanup_roles, remove_resource):
test_roles = ['user-base', 'settings-manage', 'catalogs-use']
principal = "local://fakeuser"
client = admin_mc.client
set_role_state(client, test_roles, 'global')
# Creating a crtb with a fake principal causes the user to be created
# through usermanager.EnsureUser. This triggers the creation of default
# globalRoleBinding
crtb = client.create_cluster_role_template_binding(
clusterId="local",
roleTemplateId="cluster-owner",
userPrincipalId=principal)
remove_resource(crtb)
wait_until(crtb_cb(client, crtb))
crtb = client.reload(crtb)
user = client.by_id_user(crtb.userId)
remove_resource(user)
wait_for_condition('InitialRolesPopulated',
'True', client, user, timeout=5)
user = client.reload(user)
assert len(user.globalRoleBindings()) == 3
for binding in user.globalRoleBindings():
assert binding.globalRoleId in test_roles
@pytest.mark.nonparallel
def test_default_system_project_role(admin_mc):
test_roles = ['project-owner']
client = admin_mc.client
projects = client.list_project(clusterId="local")
required_projects = {}
required_projects["Default"] = defaultProjectLabel
required_projects["System"] = systemProjectLabel
created_projects = []
for project in projects:
name = project['name']
if name == "Default" or name == "System":
project = client.reload(project)
projectLabel = required_projects[name]
assert project['labels'].\
data_dict()[projectLabel] == 'true'
created_projects.append(project)
assert len(required_projects) == len(created_projects)
for project in created_projects:
for binding in project.projectRoleTemplateBindings():
assert binding.roleTemplateId in test_roles
def set_role_state(client, roles, context):
"""Set the default templates for globalRole or roleTemplates"""
if context == 'cluster' or context == 'project':
existing_roles = client.list_role_template()
for role in existing_roles:
client.update(role, clusterCreatorDefault=False,
projectCreatorDefault=False)
for role in roles:
if context == 'cluster':
client.update(client.by_id_role_template(
role), clusterCreatorDefault=True)
elif context == 'project':
client.update(client.by_id_role_template(
role), projectCreatorDefault=True)
elif context == 'global':
existing_roles = client.list_global_role()
for role in existing_roles:
client.update(role, newUserDefault=False)
for role in roles:
client.update(client.by_id_global_role(role), newUserDefault=True)
def crtb_cb(client, crtb):
"""Wait for the crtb to have the userId populated"""
def cb():
c = client.reload(crtb)
return c.userId is not None
return cb
| 9,284 | 33.516729 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_workloads.py
|
from .common import random_str
from rancher import ApiError
from .conftest import wait_for, wait_until_available, user_project_client
import time
import pytest
def test_workload_image_change_private_registry(admin_pc):
client = admin_pc.client
registry1_name = random_str()
registries = {'index.docker.io': {
'username': 'testuser',
'password': 'foobarbaz',
}}
registry1 = client.create_dockerCredential(name=registry1_name,
registries=registries)
assert registry1.name == registry1_name
registry2_name = random_str()
registries = {'quay.io': {
'username': 'testuser',
'password': 'foobarbaz',
}}
registry2 = client.create_dockerCredential(name=registry2_name,
registries=registries)
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
workload = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'testuser/testimage',
}])
assert workload.name == name
assert len(workload.imagePullSecrets) == 1
for secret in workload.imagePullSecrets:
assert secret['name'] == registry1_name
containers = [{
'name': 'one',
'image': 'quay.io/testuser/testimage',
}]
workload = client.update(workload, containers=containers)
for container in workload.containers:
assert container['image'] == 'quay.io/testuser/testimage'
assert len(workload.imagePullSecrets) == 1
assert workload.imagePullSecrets[0]['name'] == registry2_name
client.delete(registry1)
client.delete(registry2)
client.delete(ns)
def test_workload_ports_change(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
# create workload with no ports assigned
# and verify headless service is created
name = random_str()
workload = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
}])
svc = wait_for_service_create(client, name)
assert svc.clusterIp is None
assert svc.name == workload.name
assert svc.kind == "ClusterIP"
# update workload with port, and validate cluster ip is set
ports = [{
'sourcePort': '0',
'containerPort': '80',
'kind': 'ClusterIP',
'protocol': 'TCP', }]
client.update(workload,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
'ports': ports,
}]),
svc = wait_for_service_cluserip_set(client, name)
assert svc.clusterIp is not None
# update workload with no ports, and validate cluster ip is reset
client.update(workload,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
'ports': [],
}]),
svc = wait_for_service_cluserip_reset(client, name)
assert svc.clusterIp is None
client.delete(ns)
def test_workload_probes(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
# create workload with probes
name = random_str()
container = {
'name': 'one',
'image': 'nginx',
'livenessProbe': {
'failureThreshold': 3,
'initialDelaySeconds': 10,
'periodSeconds': 2,
'successThreshold': 1,
'tcp': False,
'timeoutSeconds': 2,
'host': 'localhost',
'path': '/healthcheck',
'port': 80,
'scheme': 'HTTP',
},
'readinessProbe': {
'failureThreshold': 3,
'initialDelaySeconds': 10,
'periodSeconds': 2,
'successThreshold': 1,
'timeoutSeconds': 2,
'tcp': True,
'host': 'localhost',
'port': 80,
},
}
workload = client.create_workload(name=name,
namespaceId=ns.id,
scale=1,
containers=[container])
assert workload.containers[0].livenessProbe.host == 'localhost'
assert workload.containers[0].readinessProbe.host == 'localhost'
container['livenessProbe']['host'] = 'updatedhost'
container['readinessProbe']['host'] = 'updatedhost'
workload = client.update(workload,
namespaceId=ns.id,
scale=1,
containers=[container])
assert workload.containers[0].livenessProbe.host == 'updatedhost'
assert workload.containers[0].readinessProbe.host == 'updatedhost'
client.delete(ns)
def test_workload_scheduling(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
name = random_str()
workload = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
scheduling={
"scheduler": "some-scheduler",
},
containers=[{
'name': 'one',
'image': 'nginx',
}])
assert workload.scheduling.scheduler == "some-scheduler"
workload = client.update(workload,
namespaceId=ns.id,
scale=1,
scheduling={
"scheduler": "test-scheduler",
},
containers=[{
'name': 'one',
'image': 'nginx',
}]),
assert workload[0].scheduling.scheduler == "test-scheduler"
client.delete(ns)
def test_statefulset_workload_volumemount_subpath(admin_pc):
client = admin_pc.client
# setup
name = random_str()
# valid volumeMounts
volumeMounts = [{
'name': 'vol1',
'mountPath': 'var/lib/mysql',
'subPath': 'mysql',
}]
containers = [{
'name': 'mystatefulset',
'image': 'ubuntu:xenial',
'volumeMounts': volumeMounts,
}]
# invalid volumeMounts
volumeMounts_one = [{
'name': 'vol1',
'mountPath': 'var/lib/mysql',
'subPath': '/mysql',
}]
containers_one = [{
'name': 'mystatefulset',
'image': 'ubuntu:xenial',
'volumeMounts': volumeMounts_one,
}]
volumeMounts_two = [{
'name': 'vol1',
'mountPath': 'var/lib/mysql',
'subPath': '../mysql',
}]
containers_two = [{
'name': 'mystatefulset',
'image': 'ubuntu:xenial',
'volumeMounts': volumeMounts_two,
}]
statefulSetConfig = {
'podManagementPolicy': 'OrderedReady',
'revisionHistoryLimit': 10,
'strategy': 'RollingUpdate',
'type': 'statefulSetConfig',
}
volumes = [{
'name': 'vol1',
'persistentVolumeClaim': {
'persistentVolumeClaimId': "default: myvolume",
'readOnly': False,
'type': 'persistentVolumeClaimVolumeSource',
},
'type': 'volume',
}]
# 1. validate volumeMounts.subPath when workload creating
# invalid volumeMounts.subPath: absolute path
with pytest.raises(ApiError) as e:
client.create_workload(name=name,
namespaceId='default',
scale=1,
containers=containers_one,
statefulSetConfig=statefulSetConfig,
volumes=volumes)
assert e.value.error.status == 422
# invalid volumeMounts.subPath: contains '..'
with pytest.raises(ApiError) as e:
client.create_workload(name=name,
namespaceId='default',
scale=1,
containers=containers_two,
statefulSetConfig=statefulSetConfig,
volumes=volumes)
assert e.value.error.status == 422
# 2. validate volumeMounts.subPath when workload update
# create a validate workload then update
workload = client.create_workload(name=name,
namespaceId='default',
scale=1,
containers=containers,
statefulSetConfig=statefulSetConfig,
volumes=volumes)
with pytest.raises(ApiError) as e:
client.update(workload,
namespaceId='default',
scale=1,
containers=containers_one,
statefulSetConfig=statefulSetConfig,
volumes=volumes)
assert e.value.error.status == 422
with pytest.raises(ApiError) as e:
client.update(workload,
namespaceId='default',
scale=1,
containers=containers_two,
statefulSetConfig=statefulSetConfig,
volumes=volumes)
assert e.value.error.status == 422
def test_workload_redeploy(admin_pc, remove_resource):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
remove_resource(ns)
name = random_str()
workload = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
}])
remove_resource(workload)
client.action(workload, 'redeploy')
def _timestamp_reset():
workloads = client.list_workload(uuid=workload.uuid).data
return len(workloads) > 0 and workloads[0].annotations[
'cattle.io/timestamp'] is not None
wait_for(_timestamp_reset,
fail_handler=lambda: 'Timed out waiting for timestamp reset')
def test_perform_workload_action_read_only(admin_mc, admin_pc, remove_resource,
user_mc, user_factory):
"""Tests workload actions with a read-only user and a member user.
Note: this test exists only in 2.5+ due to pod scheduling on the
local cluster. Manually ensure that any changes in 2.4 and below
work."""
client = admin_pc.client
project = admin_pc.project
user = user_mc
user_member = user_factory()
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=project.id)
remove_resource(ns)
# Create a read-only user binding.
prtb1 = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user.user.id,
projectId=project.id,
roleTemplateId="read-only")
remove_resource(prtb1)
wait_until_available(user.client, project)
# Then, create a member user binding.
prtb2 = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_member.user.id,
projectId=project.id,
roleTemplateId="project-member")
remove_resource(prtb2)
wait_until_available(user_member.client, project)
user_pc = user_project_client(user, project)
user_member_pc = user_project_client(user_member, project)
# Admin user creates the workload.
workload_name = random_str()
workload = client.create_workload(
name=workload_name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'foo',
'image': 'rancher/mirrored-library-nginx:1.21.1-alpine',
'env': [{
'name': 'FOO_KEY',
'value': 'FOO_VALUE',
}]
}])
remove_resource(workload)
wait_for_workload(client, workload.id, ns.id)
# Admin user updates the workload to yield a rollback option.
workload.containers = [{
'name': 'foo',
'image': 'rancher/mirrored-library-nginx:1.21.1-alpine',
'env': [{
'name': 'BAR_KEY',
'value': 'BAR_VALUE',
}]
}]
workload = client.reload(workload)
workload = client.update_by_id_workload(workload.id, workload)
workload = client.reload(workload)
wait_for_workload(client, workload.id, ns.id)
original_rev_id = workload.revisions().data[0].id
# Read-only users should receive a 404 error.
with pytest.raises(ApiError) as e:
workload = client.reload(workload)
user_pc.action(obj=workload, action_name="rollback",
replicaSetId=original_rev_id)
assert e.value.error.status == 404
# Member users will be able to perform the rollback.
workload = client.reload(workload)
user_member_pc.action(obj=workload, action_name="rollback",
replicaSetId=original_rev_id)
def wait_for_service_create(client, name, timeout=30):
start = time.time()
services = client.list_service(name=name, kind="ClusterIP")
while len(services) == 0:
time.sleep(.5)
services = client.list_service(name=name, kind="ClusterIP")
if time.time() - start > timeout:
raise Exception('Timeout waiting for workload service')
return services.data[0]
def wait_for_service_cluserip_set(client, name, timeout=30):
start = time.time()
services = client.list_service(name=name, kind="ClusterIP")
while len(services) == 0 or services.data[0].clusterIp is None:
time.sleep(.5)
services = client.list_service(name=name, kind="ClusterIP")
if time.time() - start > timeout:
raise Exception('Timeout waiting for workload service')
return services.data[0]
def wait_for_service_cluserip_reset(client, name, timeout=30):
start = time.time()
services = client.list_service(name=name, kind="ClusterIP")
while len(services) == 0 or services.data[0].clusterIp is not None:
time.sleep(.5)
services = client.list_service(name=name, kind="ClusterIP")
if time.time() - start > timeout:
raise Exception('Timeout waiting for workload service')
return services.data[0]
def wait_for_workload(client, workload_id, workload_ns, timeout=30):
def _is_found():
workloads = client.list_workload(namespaceId=workload_ns)
for workload in workloads.data:
if workload.id == workload_id:
return True
return False
start = time.time()
while not _is_found():
time.sleep(.5)
if time.time() - start > timeout:
raise Exception("Timeout waiting for workload")
| 15,629 | 32.184713 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/pipeline_common.py
|
# flake8: noqa
import requests
from flask import jsonify
from threading import Thread
class MockServer(Thread):
def __init__(self, port=5000):
super().__init__()
from flask import Flask
self.port = port
self.app = Flask(__name__)
self.url = "http://localhost:%s" % self.port
self.app.add_url_rule("/shutdown", view_func=self._shutdown_server)
def _shutdown_server(self):
from flask import request
if 'werkzeug.server.shutdown' not in request.environ:
raise RuntimeError('Not running the development server')
request.environ['werkzeug.server.shutdown']()
return 'Server shutting down...'
def shutdown_server(self):
requests.get("http://localhost:%s/shutdown" % self.port,
headers={'Connection': 'close'})
self.join()
def run(self):
self.app.run(host='0.0.0.0', port=self.port, threaded=True)
class MockGithub(MockServer):
def api_user(self):
return jsonify(GITHUB_USER_PAYLOAD)
def api_repos(self):
return jsonify(GITHUB_REPOS_PAYLOAD)
def api_file_content(self):
return jsonify(GITHUB_FILE_CONTENT_PAYLOAD)
def api_commit(self):
return jsonify(GITHUB_COMMIT_PAYLOAD)
def api_branch(self):
return jsonify(GITHUB_BRANCH_PAYLOAD)
def api_access_token(self):
return jsonify({'access_token': 'test_token', 'token_type': 'bearer'})
def add_endpoints(self):
self.app.add_url_rule("/login/oauth/access_token",
view_func=self.api_access_token,
methods=('POST',))
self.app.add_url_rule("/api/v3/user", view_func=self.api_user)
self.app.add_url_rule("/api/v3/user/repos", view_func=self.api_repos)
self.app.add_url_rule(
"/api/v3/repos/octocat/Hello-World/contents/.rancher-pipeline.yml",
view_func=self.api_file_content)
self.app.add_url_rule(
"/api/v3/repos/octocat/Hello-World/commits/master",
view_func=self.api_commit)
self.app.add_url_rule("/api/v3/repos/octocat/Hello-World/branches",
view_func=self.api_branch)
pass
def __init__(self, port):
super().__init__(port)
self.add_endpoints()
GITHUB_USER_PAYLOAD = {
"login": "octocat",
"id": 1,
"node_id": "MDQ6VXNlcjE=",
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://github.com/api/v3/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://github.com/api/v3/users/octocat/followers",
"following_url": "https://github.com/api/v3/users/octocat/following{/other_user}",
"gists_url": "https://github.com/api/v3/users/octocat/gists{/gist_id}",
"starred_url": "https://github.com/api/v3/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://github.com/api/v3/users/octocat/subscriptions",
"organizations_url": "https://github.com/api/v3/users/octocat/orgs",
"repos_url": "https://github.com/api/v3/users/octocat/repos",
"events_url": "https://github.com/api/v3/users/octocat/events{/privacy}",
"received_events_url": "https://github.com/api/v3/users/octocat/received_events",
"type": "User",
"site_admin": False,
"name": "monalisa octocat",
"company": "GitHub",
"blog": "https://github.com/blog",
"location": "San Francisco",
"email": "[email protected]",
"hireable": False,
"bio": "There once was...",
"public_repos": 2,
"public_gists": 1,
"followers": 20,
"following": 0,
"created_at": "2008-01-14T04:33:35Z",
"updated_at": "2008-01-14T04:33:35Z"
}
GITHUB_REPOS_PAYLOAD = [
{
"id": 1296269,
"node_id": "MDEwOlJlcG9zaXRvcnkxMjk2MjY5",
"name": "Hello-World",
"full_name": "octocat/Hello-World",
"owner": {
"login": "octocat",
"id": 1,
"node_id": "MDQ6VXNlcjE=",
"avatar_url": "https://github.com/images/error/octocat_happy.gif",
"gravatar_id": "",
"url": "https://github.com/api/v3/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://github.com/api/v3/users/octocat/followers",
"following_url": "https://github.com/api/v3/users/octocat/following{/other_user}",
"gists_url": "https://github.com/api/v3/users/octocat/gists{/gist_id}",
"starred_url": "https://github.com/api/v3/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://github.com/api/v3/users/octocat/subscriptions",
"organizations_url": "https://github.com/api/v3/users/octocat/orgs",
"repos_url": "https://github.com/api/v3/users/octocat/repos",
"events_url": "https://github.com/api/v3/users/octocat/events{/privacy}",
"received_events_url": "https://github.com/api/v3/users/octocat/received_events",
"type": "User",
"site_admin": False
},
"private": False,
"html_url": "https://github.com/octocat/Hello-World",
"description": "This your first repo!",
"fork": False,
"url": "https://github.com/api/v3/repos/octocat/Hello-World",
"archive_url": "https://github.com/api/v3/repos/octocat/Hello-World/{archive_format}{/ref}",
"assignees_url": "https://github.com/api/v3/repos/octocat/Hello-World/assignees{/user}",
"blobs_url": "https://github.com/api/v3/repos/octocat/Hello-World/git/blobs{/sha}",
"branches_url": "https://github.com/api/v3/repos/octocat/Hello-World/branches{/branch}",
"collaborators_url": "https://github.com/api/v3/repos/octocat/Hello-World/collaborators{/collaborator}",
"comments_url": "https://github.com/api/v3/repos/octocat/Hello-World/comments{/number}",
"commits_url": "https://github.com/api/v3/repos/octocat/Hello-World/commits{/sha}",
"compare_url": "https://github.com/api/v3/repos/octocat/Hello-World/compare/{base}...{head}",
"contents_url": "https://github.com/api/v3/repos/octocat/Hello-World/contents/{+path}",
"contributors_url": "https://github.com/api/v3/repos/octocat/Hello-World/contributors",
"deployments_url": "https://github.com/api/v3/repos/octocat/Hello-World/deployments",
"downloads_url": "https://github.com/api/v3/repos/octocat/Hello-World/downloads",
"events_url": "https://github.com/api/v3/repos/octocat/Hello-World/events",
"forks_url": "https://github.com/api/v3/repos/octocat/Hello-World/forks",
"git_commits_url": "https://github.com/api/v3/repos/octocat/Hello-World/git/commits{/sha}",
"git_refs_url": "https://github.com/api/v3/repos/octocat/Hello-World/git/refs{/sha}",
"git_tags_url": "https://github.com/api/v3/repos/octocat/Hello-World/git/tags{/sha}",
"git_url": "git:github.com/octocat/Hello-World.git",
"issue_comment_url": "https://github.com/api/v3/repos/octocat/Hello-World/issues/comments{/number}",
"issue_events_url": "https://github.com/api/v3/repos/octocat/Hello-World/issues/events{/number}",
"issues_url": "https://github.com/api/v3/repos/octocat/Hello-World/issues{/number}",
"keys_url": "https://github.com/api/v3/repos/octocat/Hello-World/keys{/key_id}",
"labels_url": "https://github.com/api/v3/repos/octocat/Hello-World/labels{/name}",
"languages_url": "https://github.com/api/v3/repos/octocat/Hello-World/languages",
"merges_url": "https://github.com/api/v3/repos/octocat/Hello-World/merges",
"milestones_url": "https://github.com/api/v3/repos/octocat/Hello-World/milestones{/number}",
"notifications_url": "https://github.com/api/v3/repos/octocat/Hello-World/notifications{?since,all,participating}",
"pulls_url": "https://github.com/api/v3/repos/octocat/Hello-World/pulls{/number}",
"releases_url": "https://github.com/api/v3/repos/octocat/Hello-World/releases{/id}",
"ssh_url": "[email protected]:octocat/Hello-World.git",
"stargazers_url": "https://github.com/api/v3/repos/octocat/Hello-World/stargazers",
"statuses_url": "https://github.com/api/v3/repos/octocat/Hello-World/statuses/{sha}",
"subscribers_url": "https://github.com/api/v3/repos/octocat/Hello-World/subscribers",
"subscription_url": "https://github.com/api/v3/repos/octocat/Hello-World/subscription",
"tags_url": "https://github.com/api/v3/repos/octocat/Hello-World/tags",
"teams_url": "https://github.com/api/v3/repos/octocat/Hello-World/teams",
"trees_url": "https://github.com/api/v3/repos/octocat/Hello-World/git/trees{/sha}",
"clone_url": "https://github.com/octocat/Hello-World.git",
"mirror_url": "git:git.example.com/octocat/Hello-World",
"hooks_url": "https://github.com/api/v3/repos/octocat/Hello-World/hooks",
"svn_url": "https://svn.github.com/octocat/Hello-World",
"homepage": "https://github.com",
"language": None,
"forks_count": 9,
"stargazers_count": 80,
"watchers_count": 80,
"size": 108,
"default_branch": "master",
"open_issues_count": 0,
"topics": [
"octocat",
"atom",
"electron",
"API"
],
"has_issues": True,
"has_projects": True,
"has_wiki": True,
"has_pages": False,
"has_downloads": True,
"archived": False,
"pushed_at": "2011-01-26T19:06:43Z",
"created_at": "2011-01-26T19:01:12Z",
"updated_at": "2011-01-26T19:14:43Z",
"permissions": {
"admin": True,
"push": True,
"pull": True
},
"allow_rebase_merge": True,
"allow_squash_merge": True,
"allow_merge_commit": True,
"subscribers_count": 42,
"network_count": 0,
"license": {
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://github.com/api/v3/licenses/mit",
"node_id": "MDc6TGljZW5zZW1pdA=="
}
}
]
GITHUB_FILE_CONTENT_PAYLOAD = {
"name": ".rancher-pipeline.yml",
"path": ".rancher-pipeline.yml",
"sha": "e849c8954bad15cdccd309d3d434b7580e3246ce",
"size": 881,
"url": "https://github.com/api/v3/repos/octocat/Hello-World/contents/.rancher-pipeline.yml?ref=master",
"html_url": "https://github.com/octocat/Hello-World/blob/master/.rancher-pipeline.yml",
"git_url": "https://github.com/api/v3/repos/octocat/Hello-World/git/blobs/e849c8954bad15cdccd309d3d434b7580e3246ce",
"type": "file",
"content": "c3RhZ2VzOgotIG5hbWU6IENvZGVjZXB0aW9uIHRlc3QKICBzdGVwczoKICAt\nIHJ1blNjcmlwdENvbmZpZzoKICAgICAgaW1hZ2U6IHBocDo3LjIKICAgICAg\nc2hlbGxTY3JpcHQ6IHwtCiAgICAgICAgYXB0LWdldCB1cGRhdGUKICAgICAg\nICBhcHQtZ2V0IGluc3RhbGwgLXkgLS1uby1pbnN0YWxsLXJlY29tbWVuZHMg\nZ2l0IHppcCBsaWJzcWxpdGUzLWRldiB6bGliMWctZGV2CiAgICAgICAgZG9j\na2VyLXBocC1leHQtaW5zdGFsbCB6aXAKICAgICAgICBjdXJsIC0tc2lsZW50\nIC0tc2hvdy1lcnJvciBodHRwczovL2dldGNvbXBvc2VyLm9yZy9pbnN0YWxs\nZXIgfCBwaHAKICAgICAgICAuL2NvbXBvc2VyLnBoYXIgaW5zdGFsbCAtbiAt\nLXByZWZlci1kaXN0CiAgICAgICAgdG91Y2ggc3RvcmFnZS90ZXN0aW5nLnNx\nbGl0ZSBzdG9yYWdlL2RhdGFiYXNlLnNxbGl0ZQogICAgICAgIGNwIC5lbnYu\ndGVzdGluZyAuZW52CiAgICAgICAgcGhwIGFydGlzYW4gbWlncmF0ZQogICAg\nICAgIHBocCBhcnRpc2FuIG1pZ3JhdGUgLS1lbnY9dGVzdGluZyAtLWRhdGFi\nYXNlPXNxbGl0ZV90ZXN0aW5nIC0tZm9yY2UKICAgICAgICAuL3ZlbmRvci9i\naW4vY29kZWNlcHQgYnVpbGQKICAgICAgICAuL3ZlbmRvci9iaW4vY29kZWNl\ncHQgcnVuCi0gbmFtZTogUHVibGlzaCBpbWFnZQogIHN0ZXBzOgogIC0gcHVi\nbGlzaEltYWdlQ29uZmlnOgogICAgICBkb2NrZXJmaWxlUGF0aDogLi9Eb2Nr\nZXJmaWxlCiAgICAgIGJ1aWxkQ29udGV4dDogLgogICAgICB0YWc6IHBocC1l\neGFtcGxlOiR7Q0lDRF9FWEVDVVRJT05fU0VRVUVOQ0V9Ci0gbmFtZTogRGVw\nbG95CiAgc3RlcHM6CiAgLSBhcHBseVlhbWxDb25maWc6CiAgICAgIHBhdGg6\nIC4vZGVwbG95L2RlcGxveW1lbnQueWFtbAo=\n",
"encoding": "base64",
"_links": {
"self": "https://github.com/api/v3/repos/octocat/Hello-World/contents/.rancher-pipeline.yml?ref=master",
"git": "https://github.com/api/v3/repos/octocat/Hello-World/git/blobs/e849c8954bad15cdccd309d3d434b7580e3246ce",
"html": "https://github.com/octocat/Hello-World/blob/master/.rancher-pipeline.yml"
}
}
GITHUB_COMMIT_PAYLOAD = {
"sha": "7fd1a60b01f91b314f59955a4e4d4e80d8edf11d",
"node_id": "MDY6Q29tbWl0MTI5NjI2OTo3ZmQxYTYwYjAxZjkxYjMxNGY1OTk1NWE0ZTRkNGU4MGQ4ZWRmMTFk",
"commit": {
"author": {
"name": "The Octocat",
"email": "[email protected]",
"date": "2012-03-06T23:06:50Z"
},
"committer": {
"name": "The Octocat",
"email": "[email protected]",
"date": "2012-03-06T23:06:50Z"
},
"message": "Merge pull request #6 from Spaceghost/patch-1\n\nNew line at end of file.",
"tree": {
"sha": "b4eecafa9be2f2006ce1b709d6857b07069b4608",
"url": "https://github.com/api/v3/repos/octocat/Hello-World/git/trees/b4eecafa9be2f2006ce1b709d6857b07069b4608"
},
"url": "https://github.com/api/v3/repos/octocat/Hello-World/git/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d",
"comment_count": 55,
"verification": {
"verified": False,
"reason": "unsigned",
"signature": None,
"payload": None
}
},
"url": "https://github.com/api/v3/repos/octocat/Hello-World/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d",
"html_url": "https://github.com/octocat/Hello-World/commit/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d",
"comments_url": "https://github.com/api/v3/repos/octocat/Hello-World/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d/comments",
"author": {
"login": "octocat",
"id": 583231,
"node_id": "MDQ6VXNlcjU4MzIzMQ==",
"avatar_url": "https://avatars3.githubusercontent.com/u/583231?v=4",
"gravatar_id": "",
"url": "https://github.com/api/v3/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://github.com/api/v3/users/octocat/followers",
"following_url": "https://github.com/api/v3/users/octocat/following{/other_user}",
"gists_url": "https://github.com/api/v3/users/octocat/gists{/gist_id}",
"starred_url": "https://github.com/api/v3/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://github.com/api/v3/users/octocat/subscriptions",
"organizations_url": "https://github.com/api/v3/users/octocat/orgs",
"repos_url": "https://github.com/api/v3/users/octocat/repos",
"events_url": "https://github.com/api/v3/users/octocat/events{/privacy}",
"received_events_url": "https://github.com/api/v3/users/octocat/received_events",
"type": "User",
"site_admin": False
},
"committer": {
"login": "octocat",
"id": 583231,
"node_id": "MDQ6VXNlcjU4MzIzMQ==",
"avatar_url": "https://avatars3.githubusercontent.com/u/583231?v=4",
"gravatar_id": "",
"url": "https://github.com/api/v3/users/octocat",
"html_url": "https://github.com/octocat",
"followers_url": "https://github.com/api/v3/users/octocat/followers",
"following_url": "https://github.com/api/v3/users/octocat/following{/other_user}",
"gists_url": "https://github.com/api/v3/users/octocat/gists{/gist_id}",
"starred_url": "https://github.com/api/v3/users/octocat/starred{/owner}{/repo}",
"subscriptions_url": "https://github.com/api/v3/users/octocat/subscriptions",
"organizations_url": "https://github.com/api/v3/users/octocat/orgs",
"repos_url": "https://github.com/api/v3/users/octocat/repos",
"events_url": "https://github.com/api/v3/users/octocat/events{/privacy}",
"received_events_url": "https://github.com/api/v3/users/octocat/received_events",
"type": "User",
"site_admin": False
},
"parents": [
{
"sha": "553c2077f0edc3d5dc5d17262f6aa498e69d6f8e",
"url": "https://github.com/api/v3/repos/octocat/Hello-World/commits/553c2077f0edc3d5dc5d17262f6aa498e69d6f8e",
"html_url": "https://github.com/octocat/Hello-World/commit/553c2077f0edc3d5dc5d17262f6aa498e69d6f8e"
},
{
"sha": "762941318ee16e59dabbacb1b4049eec22f0d303",
"url": "https://github.com/api/v3/repos/octocat/Hello-World/commits/762941318ee16e59dabbacb1b4049eec22f0d303",
"html_url": "https://github.com/octocat/Hello-World/commit/762941318ee16e59dabbacb1b4049eec22f0d303"
}
],
"stats": {
"total": 2,
"additions": 1,
"deletions": 1
},
"files": [
{
"sha": "980a0d5f19a64b4b30a87d4206aade58726b60e3",
"filename": "README",
"status": "modified",
"additions": 1,
"deletions": 1,
"changes": 2,
"blob_url": "https://github.com/octocat/Hello-World/blob/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d/README",
"raw_url": "https://github.com/octocat/Hello-World/raw/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d/README",
"contents_url": "https://github.com/api/v3/repos/octocat/Hello-World/contents/README?ref=7fd1a60b01f91b314f59955a4e4d4e80d8edf11d",
"patch": "@@ -1 +1 @@\n-Hello World!\n\\ No newline at end of file\n+Hello World!"
}
]
}
GITHUB_BRANCH_PAYLOAD = [
{
"name": "master",
"commit": {
"sha": "7fd1a60b01f91b314f59955a4e4d4e80d8edf11d",
"url": "https://github.com/api/v3/repos/octocat/Hello-World/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d"
}
}
]
| 17,913 | 49.461972 | 1,234 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_hpa.py
|
from .common import random_str
def test_hpa(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
workload = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
'resources': {
'requests': '100m',
},
}])
assert workload.id != ''
name = random_str()
client.create_horizontalPodAutoscaler(
name=name,
namespaceId=ns.id,
maxReplicas=10,
workloadId=workload.id,
metrics=[{
'name': 'cpu',
'type': 'Resource',
'target': {
'type': 'Utilization',
'utilization': '50',
},
}, {
'name': 'pods-test',
'type': 'Pods',
'target': {
'type': 'AverageValue',
'averageValue': '50',
},
}, {
'name': 'pods-external',
'type': 'External',
'target': {
'type': 'Value',
'value': '50',
},
}, {
"describedObject": {
"apiVersion": "extensions/v1beta1",
"kind": "Ingress",
"name": "test",
},
'name': 'object-test',
'type': 'Object',
'target': {
'type': 'Value',
'value': '50',
},
}],
)
hpas = client.list_horizontalPodAutoscaler(
namespaceId=ns.id
)
assert len(hpas) == 1
hpa = hpas.data[0]
assert hpa.state == "initializing"
client.delete(hpa)
client.delete(workload)
client.delete(ns)
| 1,872 | 25.013889 | 51 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_role_template.py
|
from .common import random_str
from .conftest import wait_until_available, wait_until, wait_for
from rancher import ApiError
import time
import pytest
import kubernetes
def test_role_template_creation(admin_mc, remove_resource):
rt_name = random_str()
rt = admin_mc.client.create_role_template(name=rt_name)
remove_resource(rt)
assert rt is not None
assert rt.name == rt_name
def test_administrative_role_template_creation(admin_mc, remove_resource):
client = admin_mc.client
crt_name = random_str()
crt = client.create_role_template(name=crt_name,
context="cluster",
administrative=True)
remove_resource(crt)
assert crt is not None
assert crt.name == crt_name
prt_name = random_str()
try:
client.create_role_template(name=prt_name,
context="project",
administrative=True)
except ApiError as e:
assert e.error.status == 500
assert e.error.message == "Only cluster roles can be administrative"
def test_edit_builtin_role_template(admin_mc, remove_resource):
client = admin_mc.client
# edit non builtin role, any field is updatable
org_rt_name = random_str()
rt = client.create_role_template(name=org_rt_name)
remove_resource(rt)
wait_for_role_template_creation(admin_mc, org_rt_name)
new_rt_name = random_str()
rt = client.update(rt, name=new_rt_name)
assert rt.name == new_rt_name
# edit builtin role, only locked,cluster/projectcreatordefault
# are updatable
new_rt_name = "Cluster Member-Updated"
cm_rt = client.by_id_role_template("cluster-member")
rt = client.update(cm_rt, name=new_rt_name)
assert rt.name == "Cluster Member"
def test_context_prtb(admin_mc, admin_pc, remove_resource,
user_mc):
"""Asserts that a projectroletemplatebinding cannot reference a cluster
roletemplate
"""
admin_client = admin_mc.client
project = admin_pc.project
with pytest.raises(ApiError) as e:
prtb = admin_client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_mc.user.id,
projectId=project.id,
roleTemplateId="cluster-owner"
)
remove_resource(prtb)
assert e.value.error.status == 422
assert "Cannot reference context [cluster] from [project] context" in \
e.value.error.message
def test_context_crtb(admin_mc, admin_cc, remove_resource,
user_mc):
"""Asserts that a clusterroletemplatebinding cannot reference a project
roletemplate
"""
admin_client = admin_mc.client
with pytest.raises(ApiError) as e:
crtb = admin_client.create_cluster_role_template_binding(
userId=user_mc.user.id,
roleTemplateId="project-owner",
clusterId=admin_cc.cluster.id,
)
remove_resource(crtb)
assert e.value.error.status == 422
assert "Cannot reference context [project] from [cluster] context" in \
e.value.error.message
def test_cloned_role_permissions(admin_mc, remove_resource, user_factory,
admin_pc):
client = admin_mc.client
rt_name = random_str()
rt = client.create_role_template(name=rt_name, context="project",
roleTemplateIds=["project-owner"])
remove_resource(rt)
wait_for_role_template_creation(admin_mc, rt_name)
# user with cloned project owner role should be able to enable monitoring
cloned_user = user_factory()
remove_resource(cloned_user)
prtb = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=cloned_user.user.id,
projectId=admin_pc.project.id,
roleTemplateId=rt.id
)
remove_resource(prtb)
wait_until_available(cloned_user.client, admin_pc.project)
project = cloned_user.client.by_id_project(admin_pc.project.id)
assert project.actions.enableMonitoring
def test_update_role_template_permissions(admin_mc, remove_resource,
user_factory, admin_cc):
client = admin_mc.client
cc_rt_name = random_str()
view_cc_rule = [{'apiGroups': ['management.cattle.io'],
'resources': ['clustercatalogs'],
'type': '/v3/schemas/policyRule',
'verbs': ['get', 'list', 'watch']},
{'apiGroups': ['management.cattle.io'],
'resources': ['clusterevents'],
'type': '/v3/schemas/policyRule',
'verbs': ['get', 'list', 'watch']}]
rt = client.create_role_template(name=cc_rt_name, context="cluster",
rules=view_cc_rule)
# remove_resource(rt)
role_template_id = rt['id']
wait_for_role_template_creation(admin_mc, cc_rt_name)
user_view_cc = user_factory()
user_client = user_view_cc.client
crtb = client.create_cluster_role_template_binding(
userId=user_view_cc.user.id,
roleTemplateId=role_template_id,
clusterId=admin_cc.cluster.id,
)
remove_resource(crtb)
wait_until_available(user_client, admin_cc.cluster)
# add clustercatalog as admin
url = "https://github.com/rancher/integration-test-charts.git"
name = random_str()
cluster_catalog = \
client.create_cluster_catalog(name=name,
branch="master",
url=url,
clusterId="local",
)
remove_resource(cluster_catalog)
wait_until_available(client, cluster_catalog)
# list clustercatalog as the cluster-member
cc = user_client.list_cluster_catalog(name=name)
assert len(cc) == 1
# update role to remove view clustercatalogs permission
view_cc_role_template = client.by_id_role_template(role_template_id)
new_rules = [{'apiGroups': ['management.cattle.io'],
'resources': ['clusterevents'],
'type': '/v3/schemas/policyRule',
'verbs': ['get', 'list', 'watch']}]
client.update(view_cc_role_template, rules=new_rules)
wait_until(lambda: client.reload(view_cc_role_template)['rules'] is None)
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
def check_role_rules(rbac, namespace, role_name, rules):
role = rbac.read_namespaced_role(role_name, namespace)
if len(role.rules) == len(rules) and \
role.rules[0].resources == ["clusterevents"]:
return True
wait_for(lambda: check_role_rules(rbac, 'local', role_template_id,
new_rules),
timeout=60, fail_handler=lambda:
'failed to check updated role')
# user should not be able to list cluster catalog now
cc = user_client.list_cluster_catalog(name=name)
assert len(cc) == 0
def test_role_template_update_inherited_role(admin_mc, remove_resource,
user_factory, admin_pc):
client = admin_mc.client
name = random_str()
# clone project-member role
pm = client.by_id_role_template("project-member")
cloned_pm = client.create_role_template(name=name, context="project",
rules=pm.rules,
roleTemplateIds=["edit"])
remove_resource(cloned_pm)
role_template_id = cloned_pm['id']
wait_for_role_template_creation(admin_mc, name)
# create a namespace in this project
ns_name = random_str()
ns = admin_pc.cluster.client.create_namespace(name=ns_name,
projectId=admin_pc.
project.id)
remove_resource(ns)
# add user to a project with this role
user_cloned_pm = user_factory()
prtb = client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_cloned_pm.user.id,
projectId=admin_pc.project.id,
roleTemplateId=role_template_id
)
remove_resource(prtb)
wait_until_available(user_cloned_pm.client, admin_pc.project)
# As the user, assert that the two expected role bindings exist in the
# namespace for the user. There should be one for the rancher role
# 'cloned_pm' and one for the k8s built-in role 'edit'
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
def _refresh_user_template():
rbs = rbac.list_namespaced_role_binding(ns_name)
rb_dict = {}
for rb in rbs.items:
if rb.subjects[0].name == user_cloned_pm.user.id:
rb_dict[rb.role_ref.name] = rb
return role_template_id in rb_dict and 'edit' in rb_dict
wait_for(_refresh_user_template,
fail_handler=lambda: 'role bindings do not exist')
# now edit the roleTemplate to remove "edit" from inherited roles,
# and add "view" to inherited roles
client.update(cloned_pm, roleTemplateIds=["view"])
wait_until(lambda: client.reload(cloned_pm)['roleTemplateIds'] is ["view"])
def check_rb(rbac):
rbs = rbac.list_namespaced_role_binding(ns_name)
for rb in rbs.items:
if rb.subjects[0].name == user_cloned_pm.user.id \
and rb.role_ref.name == "view":
return True
wait_for(lambda: check_rb(rbac), timeout=60,
fail_handler=lambda: 'failed to check updated rolebinding')
# Now there should be one rolebinding for the rancher role
# 'cloned_pm' and one for the k8s built-in role 'view'
rbac = kubernetes.client.RbacAuthorizationV1Api(admin_mc.k8s_client)
rbs = rbac.list_namespaced_role_binding(ns_name)
rb_dict = {}
for rb in rbs.items:
if rb.subjects[0].name == user_cloned_pm.user.id:
rb_dict[rb.role_ref.name] = rb
assert role_template_id in rb_dict
assert 'view' in rb_dict
assert 'edit' not in rb_dict
def test_kubernetes_admin_permissions(admin_mc, remove_resource, user_factory,
admin_pc):
client = admin_mc.client
name = random_str()
# clone Kubernetes-admin role
cloned_admin = client.create_role_template(name=name, context="project",
roleTemplateIds=["admin"])
remove_resource(cloned_admin)
wait_for_role_template_creation(admin_mc, name)
# add user with cloned kubernetes-admin role to a project
cloned_user = user_factory()
remove_resource(cloned_user)
prtb = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=cloned_user.user.id,
projectId=admin_pc.project.id,
roleTemplateId=cloned_admin.id
)
remove_resource(prtb)
wait_until_available(cloned_user.client, admin_pc.project)
# cloned kubernetes-admin role should not give user project-owner
# privileges, for instance, user should not be able to create enable
# monitoring
project = cloned_user.client.by_id_project(admin_pc.project.id)
assert 'enableMonitoring' not in project.actions
def test_role_template_changes_revoke_permissions(admin_mc, remove_resource,
user_factory, admin_pc):
client = admin_mc.client
name = random_str()
# clone project-owner role
po = client.by_id_role_template("project-owner")
cloned_po = client.create_role_template(name=name, context="project",
rules=po.rules,
roleTemplateIds=["admin"])
remove_resource(cloned_po)
wait_for_role_template_creation(admin_mc, name)
role_template_id = cloned_po['id']
user = user_factory()
# add a user with this cloned project-owner role to a project
prtb = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user.user.id,
projectId=admin_pc.project.id,
roleTemplateId=role_template_id
)
remove_resource(prtb)
wait_until_available(user.client, admin_pc.project)
# this user should be able to list PRTBs
def _list_prtbs():
prtbs = user.client.list_project_role_template_binding()
return len(prtbs.data) > 0
wait_for(_list_prtbs, fail_handler=lambda: "user was unable to list PRTBs")
# now edit the cloned roletemplate to remove permission
# to list projectroletemplatebindings
rules = cloned_po['rules']
for ind, rule in enumerate(rules):
if 'projectroletemplatebindings' in rule['resources']:
del rules[ind]
client.update(cloned_po, rules=rules)
def role_template_update_check():
rt = client.by_id_role_template(role_template_id)
for rule in rt['rules']:
if 'projectroletemplatebindings' in rule['resources']:
return False
return True
def fail_handler():
return "failed waiting for cloned roletemplate to be updated"
# Validate the rule was dropped
wait_for(role_template_update_check, fail_handler=fail_handler(),
timeout=120)
# this user should NOT be able to list PRTBs
def _list_prtbs_empty():
prtbs = user.client.list_project_role_template_binding()
return len(prtbs.data) == 0
wait_for(_list_prtbs_empty,
fail_handler=lambda: "user was able to list PRTBs")
def wait_for_role_template_creation(admin_mc, rt_name, timeout=60):
start = time.time()
interval = 0.5
client = admin_mc.client
found = False
while not found:
if time.time() - start > timeout:
raise Exception('Timeout waiting for roletemplate creation')
rt = client.list_role_template(name=rt_name)
if len(rt) > 0:
found = True
time.sleep(interval)
interval *= 2
| 14,180 | 36.715426 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_ingress.py
|
from .common import random_str, auth_check
def test_ingress_fields(admin_pc_client):
auth_check(admin_pc_client.schema, 'ingress', 'crud', {
'namespaceId': 'cr',
'projectId': 'cr',
'rules': 'cru',
'tls': 'cru',
'ingressClassName': 'cru',
'defaultBackend': 'cru',
'publicEndpoints': 'r',
'status': 'r',
})
auth_check(admin_pc_client.schema, 'ingressBackend', '', {
'serviceId': 'cru',
'targetPort': 'cru',
'resource': 'cru',
'workloadIds': 'cru',
})
auth_check(admin_pc_client.schema, 'ingressRule', '', {
'host': 'cru',
'paths': 'cru',
})
auth_check(admin_pc_client.schema, 'httpIngressPath', '', {
'resource': 'cru',
'pathType': 'cru',
'path': 'cru',
'serviceId': 'cru',
'targetPort': 'cru',
'workloadIds': 'cru',
})
def test_ingress(admin_pc, admin_cc_client):
client = admin_pc.client
ns = admin_cc_client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
workload = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
}])
name = random_str() + "." + random_str()
ingress = client.create_ingress(name=name,
namespaceId=ns.id,
rules=[{
'host': "foo.com",
'paths': [
{
'path': '/',
'targetPort': 80,
'workloadIds':
[workload.id],
},
]},
])
assert len(ingress.rules) == 1
assert ingress.rules[0].host == "foo.com"
path = ingress.rules[0].paths[0]
assert path.path == '/'
assert path.targetPort == 80
assert path.workloadIds == [workload.id]
assert path.serviceId is None
client.delete(ns)
def test_ingress_rules_same_hostPortPath(admin_pc, admin_cc_client):
client = admin_pc.client
ns = admin_cc_client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
workload1 = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
}])
name = random_str()
workload2 = client.create_workload(
name=name,
namespaceId=ns.id,
scale=1,
containers=[{
'name': 'one',
'image': 'nginx',
}])
name = random_str()
ingress = client.create_ingress(name=name,
namespaceId=ns.id,
rules=[{
'host': "foo.com",
'paths': [
{
'path': '/',
'targetPort': 80,
'workloadIds':
[workload1.id],
},
]},
{
'host': "foo.com",
'paths': [
{
'path': '/',
'targetPort': 80,
'workloadIds':
[workload2.id],
}
]},
])
assert len(ingress.rules) == 1
assert ingress.rules[0].host == "foo.com"
path = ingress.rules[0].paths[0]
assert path.path == '/'
assert path.targetPort == 80
assert len(path.workloadIds) == 2
assert set(path.workloadIds) == set([workload1.id, workload2.id])
assert path.serviceId is None
client.delete(ns)
| 4,630 | 31.843972 | 72 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_rbac.py
|
import kubernetes
import pytest
from rancher import ApiError
import time
from .common import random_str
from .test_catalog import wait_for_template_to_be_created
from .conftest import wait_until_available, wait_until, \
cluster_and_client, user_project_client, \
kubernetes_api_client, wait_for, ClusterContext, \
user_cluster_client
def test_multi_user(admin_mc, user_mc):
"""Tests a bug in the python client where multiple clients would not
work properly. All clients would get the auth header of the last client"""
# Original admin client should be able to get auth configs
ac = admin_mc.client.list_auth_config()
assert len(ac) > 0
# User client should not. We currently dont 404 on this, which would be
# more correct. Instead, list gets filtered to zero
ac = user_mc.client.list_auth_config()
assert len(ac) == 0
def test_project_owner(admin_cc, admin_mc, user_mc, remove_resource):
"""Tests that a non-admin member can create a project, create and
add a namespace to it, and can do workload related things in the namespace.
This is the first test written incorporating a non-admin user and the
kubernetes python client. It does a lot partially as an experiment and
partially as an example for other yet-to-be-written tests
"""
admin_client = admin_mc.client
admin_client.create_cluster_role_template_binding(
userId=user_mc.user.id,
roleTemplateId="cluster-member",
clusterId=admin_cc.cluster.id,
)
user_client = user_mc.client
# When this returns, the user can successfully access the cluster and thus
# can create a project in it. We generally need this wait_until_available
# call when we are creating cluster, project, and namespaces as non-admins
# because until the rbac controllers have had a chance to run and the
# creator is bootstrapped into the resource, they will not be able to
# access it
wait_until_available(user_client, admin_cc.cluster)
proj_name = 'test-' + random_str()
def can_create_project():
try:
p = user_client.create_project(name=proj_name,
clusterId=admin_cc.cluster.id)
# In case something goes badly as the user, add a finalizer to
# delete the project as the admin
remove_resource(p)
return p
except ApiError as e:
assert e.error.status == 403
return False
proj = wait_for(can_create_project)
# When this returns, the user can successfully access the project and thus
# can create a namespace in it
proj = wait_until_available(user_client, proj)
proj = user_client.wait_success(proj)
assert proj.state == 'active'
k8s_client = kubernetes_api_client(user_client, 'local')
auth = kubernetes.client.AuthorizationV1Api(k8s_client)
# Rancher API doesn't have a surefire way of knowing exactly when the user
# has the ability to create namespaces yet. So we have to rely on an actual
# kubernetes auth check.
def can_create_ns():
access_review = kubernetes.client.V1SelfSubjectAccessReview(spec={
"resourceAttributes": {
'verb': 'create',
'resource': 'namespaces',
'group': '',
},
})
response = auth.create_self_subject_access_review(access_review)
return response.status.allowed
wait_for(can_create_ns)
c_client = cluster_and_client('local', user_mc.client)[1]
ns = c_client.create_namespace(name='test-' + random_str(),
projectId=proj.id)
ns = wait_until_available(c_client, ns)
ns = c_client.wait_success(ns)
assert ns.state == 'active'
# Simple proof that user can get pods in the created namespace.
# We just care that the list call does not error out
core = kubernetes.client.CoreV1Api(api_client=k8s_client)
core.list_namespaced_pod(ns.name)
# As the user, assert that the two expected role bindings exist in the
# namespace for the user. There should be one for the rancher role
# 'project-owner' and one for the k8s built-in role 'admin'
rbac = kubernetes.client.RbacAuthorizationV1Api(api_client=k8s_client)
rbs = rbac.list_namespaced_role_binding(ns.name)
rb_dict = {}
for rb in rbs.items:
if rb.subjects[0].name == user_mc.user.id:
rb_dict[rb.role_ref.name] = rb
assert 'project-owner' in rb_dict
assert 'admin' in rb_dict
# As an additional measure of proof and partially just as an exercise in
# using this particular k8s api, check that the user can create
# deployments using the subject access review api
access_review = kubernetes.client.V1LocalSubjectAccessReview(spec={
"resourceAttributes": {
'namespace': ns.name,
'verb': 'create',
'resource': 'deployments',
'group': 'extensions',
},
})
response = auth.create_self_subject_access_review(access_review)
assert response.status.allowed is True
# List_namespaced_pod just list the pods of default core groups.
# If you want to list the metrics of pods,
# users should have the permissions of metrics.k8s.io group.
# As a proof, we use this particular k8s api, check that the user can list
# pods.metrics.k8s.io using the subject access review api
access_review = kubernetes.client.V1LocalSubjectAccessReview(spec={
"resourceAttributes": {
'namespace': ns.name,
'verb': 'list',
'resource': 'pods',
'group': 'metrics.k8s.io',
},
})
response = auth.create_self_subject_access_review(access_review)
assert response.status.allowed is True
def test_api_group_in_role_template(admin_mc, admin_pc, user_mc,
remove_resource):
"""Test that a role moved into a cluster namespace is translated as
intended and respects apiGroups
"""
# If the admin can't see any nodes this test will fail
if len(admin_mc.client.list_node().data) == 0:
pytest.skip("no nodes in the cluster")
# Validate the standard user can not see any nodes
assert len(user_mc.client.list_node().data) == 0
rt_dict = {
"administrative": False,
"clusterCreatorDefault": False,
"context": "cluster",
"external": False,
"hidden": False,
"locked": False,
"name": random_str(),
"projectCreatorDefault": False,
"rules": [{
"apiGroups": [
"management.cattle.io"
],
"resources": ["nodes",
"nodepools"
],
"type": "/v3/schemas/policyRule",
"verbs": ["get",
"list",
"watch"
]
},
{
"apiGroups": [
"scheduling.k8s.io"
],
"resources": [
"*"
],
"type": "/v3/schemas/policyRule",
"verbs": [
"*"
]
}
],
}
rt = admin_mc.client.create_role_template(rt_dict)
remove_resource(rt)
def _wait_role_template():
return admin_mc.client.by_id_role_template(rt.id) is not None
wait_for(_wait_role_template,
fail_handler=lambda: "role template is missing")
crtb_client = admin_mc.client.create_cluster_role_template_binding
crtb = crtb_client(userPrincipalId=user_mc.user.principalIds[0],
roleTemplateId=rt.id,
clusterId='local')
remove_resource(crtb)
def _wait_on_user():
return len(user_mc.client.list_node().data) > 0
wait_for(_wait_on_user, fail_handler=lambda: "User could never see nodes")
# With the new binding user should be able to see nodes
assert len(user_mc.client.list_node().data) > 0
# The binding does not allow delete permissions
with pytest.raises(ApiError) as e:
user_mc.client.delete(user_mc.client.list_node().data[0])
assert e.value.error.status == 403
assert 'cannot delete resource "nodes"' in e.value.error.message
def test_removing_user_from_cluster(admin_pc, admin_mc, user_mc, admin_cc,
remove_resource):
"""Test that a user added to a project in a cluster is able to see that
cluster and after being removed from the project they are no longer able
to see the cluster.
"""
mbo = 'membership-binding-owner'
admin_client = admin_mc.client
prtb = admin_client.create_project_role_template_binding(
userId=user_mc.user.id,
roleTemplateId="project-member",
projectId=admin_pc.project.id,
)
remove_resource(prtb)
# Verify the user can see the cluster
wait_until_available(user_mc.client, admin_cc.cluster)
split = str.split(prtb.id, ":")
prtb_key = split[0] + "_" + split[1]
api_instance = kubernetes.client.RbacAuthorizationV1Api(
admin_mc.k8s_client)
def crb_created():
crbs = api_instance.list_cluster_role_binding(
label_selector=prtb_key + "=" + mbo)
return len(crbs.items) == 1
# Find the expected k8s clusterRoleBinding
wait_for(crb_created,
fail_handler=lambda: "failed waiting for clusterRoleBinding"
" to get created",
timeout=120)
# Delete the projectRoleTemplateBinding, this should cause the user to no
# longer be able to see the cluster
admin_mc.client.delete(prtb)
def crb_deleted():
crbs = api_instance.list_cluster_role_binding(
label_selector=prtb_key + "=" + mbo)
return len(crbs.items) == 0
wait_for(crb_deleted,
fail_handler=lambda: "failed waiting for clusterRoleBinding"
" to get deleted",
timeout=120)
# user should now have no access to any clusters
def list_clusters():
clusters = user_mc.client.list_cluster()
return len(clusters.data) == 0
wait_for(list_clusters,
fail_handler=lambda: "failed revoking access to cluster",
timeout=120)
with pytest.raises(ApiError) as e:
user_mc.client.by_id_cluster(admin_cc.cluster.id)
assert e.value.error.status == 403
def test_upgraded_setup_removing_user_from_cluster(admin_pc, admin_mc,
user_mc, admin_cc,
remove_resource):
"""Test that a user added to a project in a cluster prior to 2.5, upon
upgrade is able to see that cluster, and after being removed from the
project they are no longer able to see the cluster.
Upgrade will be simulated by editing the CRB to include the older label
format, containing the PRTB UID
"""
mbo = 'membership-binding-owner'
# Yes, this is misspelled, it's how the actual label was spelled
# prior to 2.5.
mbo_legacy = 'memberhsip-binding-owner'
admin_client = admin_mc.client
prtb = admin_client.create_project_role_template_binding(
userId=user_mc.user.id,
roleTemplateId="project-member",
projectId=admin_pc.project.id,
)
remove_resource(prtb)
# Verify the user can see the cluster
wait_until_available(user_mc.client, admin_cc.cluster)
api_instance = kubernetes.client.RbacAuthorizationV1Api(
admin_mc.k8s_client)
split = str.split(prtb.id, ":")
prtb_key = split[0]+"_"+split[1]
def crb_created():
crbs = api_instance.list_cluster_role_binding(
label_selector=prtb_key + "=" + mbo)
return len(crbs.items) == 1
# Find the expected k8s clusterRoleBinding
wait_for(crb_created,
fail_handler=lambda: "failed waiting for clusterRoleBinding to"
"get created", timeout=120)
crbs = api_instance.list_cluster_role_binding(
label_selector=prtb_key + "=" + mbo)
assert len(crbs.items) == 1
# edit this CRB to add in the legacy label to simulate an upgraded setup
crb = crbs.items[0]
crb.metadata.labels[prtb.uuid] = mbo_legacy
api_instance.patch_cluster_role_binding(crb.metadata.name, crb)
def crb_label_updated():
crbs = api_instance.list_cluster_role_binding(
label_selector=prtb.uuid + "=" + mbo_legacy)
return len(crbs.items) == 1
wait_for(crb_label_updated,
fail_handler=lambda: "failed waiting for cluster role binding to"
"be updated", timeout=120)
# Delete the projectRoleTemplateBinding, this should cause the user to no
# longer be able to see the cluster
admin_mc.client.delete(prtb)
def crb_callback():
crbs_listed_with_new_label = api_instance.list_cluster_role_binding(
label_selector=prtb_key + "=" + mbo)
crbs_listed_with_old_label = api_instance.list_cluster_role_binding(
label_selector=prtb.uuid + "=" + mbo_legacy)
return len(crbs_listed_with_new_label.items) == 0 and\
len(crbs_listed_with_old_label.items) == 0
def fail_handler():
return "failed waiting for cluster role binding to be deleted"
wait_for(crb_callback, fail_handler=fail_handler, timeout=120)
# user should now have no access to any clusters
def list_clusters():
clusters = user_mc.client.list_cluster()
return len(clusters.data) == 0
wait_for(list_clusters,
fail_handler=lambda: "failed revoking access to cluster",
timeout=120)
with pytest.raises(ApiError) as e:
user_mc.client.by_id_cluster(admin_cc.cluster.id)
assert e.value.error.status == 403
def test_user_role_permissions(admin_mc, user_factory, remove_resource):
"""Test that a standard user can only see themselves """
admin_client = admin_mc.client
# Create 4 new users, one with user-base
user1 = user_factory()
user2 = user_factory(globalRoleId='user-base')
user_factory()
user_factory()
users = admin_client.list_user()
# Admin should see at least 5 users
assert len(users.data) >= 5
# user1 should only see themselves in the user list
users1 = user1.client.list_user()
assert len(users1.data) == 1, "user should only see themselves"
# user1 can see all roleTemplates
role_templates = user1.client.list_role_template()
assert len(role_templates.data) > 0, ("user should be able to see all " +
"roleTemplates")
# user2 should only see themselves in the user list
users2 = user2.client.list_user()
assert len(users2.data) == 1, "user should only see themselves"
# user2 should not see any role templates
role_templates = user2.client.list_role_template()
assert len(role_templates.data) == 0, ("user2 does not have permission " +
"to view roleTemplates")
def test_impersonation_passthrough(admin_mc, admin_cc, user_mc, user_factory,
remove_resource, request):
"""Test users abalility to impersonate other users"""
admin_client = admin_mc.client
user1 = user_factory()
user2 = user_factory()
admin_client.create_cluster_role_template_binding(
userId=user1.user.id,
roleTemplateId="cluster-member",
clusterId=admin_cc.cluster.id,
)
admin_client.create_cluster_role_template_binding(
userId=user2.user.id,
roleTemplateId="cluster-owner",
clusterId=admin_cc.cluster.id,
)
wait_until_available(user1.client, admin_cc.cluster)
wait_until_available(user2.client, admin_cc.cluster)
admin_k8s_client = kubernetes_api_client(admin_client, 'local')
user1_k8s_client = kubernetes_api_client(user1.client, 'local')
user2_k8s_client = kubernetes_api_client(user2.client, 'local')
admin_auth = kubernetes.client.AuthorizationV1Api(admin_k8s_client)
user1_auth = kubernetes.client.AuthorizationV1Api(user1_k8s_client)
user2_auth = kubernetes.client.AuthorizationV1Api(user2_k8s_client)
access_review = kubernetes.client.V1SelfSubjectAccessReview(spec={
"resourceAttributes": {
'verb': 'impersonate',
'resource': 'users',
'group': '',
},
})
# Admin can always impersonate
response = admin_auth.create_self_subject_access_review(access_review)
assert response.status.allowed is True
# User1 is a member of the cluster which does not grant impersonate
response = user1_auth.create_self_subject_access_review(access_review)
assert response.status.allowed is False
# User2 is an owner/admin which allows them to impersonate
def _access_check():
response = user2_auth.create_self_subject_access_review(access_review)
return response.status.allowed is True
wait_for(_access_check, fail_handler=lambda: "user2 does not have access")
# Add a role and role binding to user user1 allowing user1 to impersonate
# user2
admin_rbac = kubernetes.client.RbacAuthorizationV1Api(admin_k8s_client)
body = kubernetes.client.V1ClusterRole(
metadata={'name': 'limited-impersonator'},
rules=[{
'resources': ['users'],
'apiGroups': [''],
'verbs': ['impersonate'],
'resourceNames': [user2.user.id]
}]
)
impersonate_role = admin_rbac.create_cluster_role(body)
request.addfinalizer(lambda: admin_rbac.delete_cluster_role(
impersonate_role.metadata.name))
binding = kubernetes.client.V1ClusterRoleBinding(
metadata={'name': 'limited-impersonator-binding'},
role_ref={
'apiGroups': [''],
'kind': 'ClusterRole',
'name': 'limited-impersonator'
},
subjects=[{'kind': 'User', 'name': user1.user.id}]
)
impersonate_role_binding = admin_rbac.create_cluster_role_binding(binding)
request.addfinalizer(lambda: admin_rbac.delete_cluster_role_binding(
impersonate_role_binding.metadata.name))
access_review2 = kubernetes.client.V1SelfSubjectAccessReview(spec={
"resourceAttributes": {
'verb': 'impersonate',
'resource': 'users',
'group': '',
'name': user2.user.id
},
})
# User1 should now be abele to imerpsonate as user2
def _access_check2():
response = user1_auth.create_self_subject_access_review(access_review2)
return response.status.allowed is True
wait_for(_access_check2, fail_handler=lambda: "user1 does not have access")
def test_permissions_can_be_removed(admin_cc, admin_mc, user_mc, request,
remove_resource, admin_pc_factory):
def create_project_and_add_user():
admin_pc_instance = admin_pc_factory()
prtb = admin_mc.client.create_project_role_template_binding(
userId=user_mc.user.id,
roleTemplateId="project-member",
projectId=admin_pc_instance.project.id,
)
remove_resource(prtb)
wait_until_available(user_mc.client, admin_pc_instance.project)
return admin_pc_instance, prtb
admin_pc1, _ = create_project_and_add_user()
admin_pc2, prtb2 = create_project_and_add_user()
def add_namespace_to_project(admin_pc):
def safe_remove(client, resource):
try:
client.delete(resource)
except ApiError:
pass
ns = admin_cc.client.create_namespace(name=random_str(),
projectId=admin_pc.project.id)
request.addfinalizer(lambda: safe_remove(admin_cc.client, ns))
def ns_active():
new_ns = admin_cc.client.reload(ns)
return new_ns.state == 'active'
wait_for(ns_active)
add_namespace_to_project(admin_pc1)
def new_user_cc(user_mc):
cluster, client = cluster_and_client('local', user_mc.client)
return ClusterContext(user_mc, cluster, client)
user_cc = new_user_cc(user_mc)
wait_for(lambda: ns_count(user_cc.client, 1), timeout=60)
add_namespace_to_project(admin_pc2)
user_cc = new_user_cc(user_mc)
wait_for(lambda: ns_count(user_cc.client, 2), timeout=60)
admin_mc.client.delete(prtb2)
user_cc = new_user_cc(user_mc)
wait_for(lambda: ns_count(user_cc.client, 1), timeout=60)
def ns_count(client, count):
return len(client.list_namespace()) == count
def test_appropriate_users_can_see_kontainer_drivers(user_factory):
kds = user_factory().client.list_kontainer_driver()
assert len(kds) == 11
kds = user_factory('clusters-create').client.list_kontainer_driver()
assert len(kds) == 11
kds = user_factory('kontainerdrivers-manage').client. \
list_kontainer_driver()
assert len(kds) == 11
kds = user_factory('settings-manage').client.list_kontainer_driver()
assert len(kds) == 0
def test_readonly_cannot_perform_app_action(admin_mc, admin_pc, user_mc,
remove_resource):
"""Tests that a user with readonly access is not able to upgrade an app
"""
client = admin_pc.client
project = admin_pc.project
user = user_mc
remove_resource(user)
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=project.id)
remove_resource(ns)
wait_for_template_to_be_created(admin_mc.client, "library")
prtb = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user.user.id,
projectId=project.id,
roleTemplateId="read-only")
remove_resource(prtb)
wait_until_available(user.client, project)
app = client.create_app(
name="app-" + random_str(),
externalId="catalog://?catalog=library&template=mysql&version=0.3.7&"
"namespace=cattle-global-data",
targetNamespace=ns.name,
projectId=project.id
)
with pytest.raises(ApiError) as e:
user.client.action(obj=app, action_name="upgrade",
answers={"abc": "123"})
assert e.value.error.status == 403
with pytest.raises(ApiError) as e:
user.client.action(obj=app, action_name="rollback",
revisionId="test")
assert e.value.error.status == 403
def test_member_can_perform_app_action(admin_mc, admin_pc, remove_resource,
user_mc):
"""Tests that a user with member access is able to upgrade an app
"""
client = admin_pc.client
project = admin_pc.project
user = user_mc
remove_resource(user)
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=project.id)
remove_resource(ns)
wait_for_template_to_be_created(admin_mc.client, "library")
prtb = admin_mc.client.create_project_role_template_binding(
name="test-" + random_str(),
userId=user.user.id,
projectId=project.id,
roleTemplateId="project-owner")
remove_resource(prtb)
wait_until_available(user.client, project)
app = client.create_app(
name="test-" + random_str(),
externalId="catalog://?catalog=library&template"
"=mysql&version=1.3.1&"
"namespace=cattle-global-data",
targetNamespace=ns.name,
projectId=project.id
)
# if upgrade is performed prior to installing state,
# it may return a modified error
def is_installing():
current_state = client.reload(app)
if current_state.state == "installing":
return True
return False
try:
wait_for(is_installing)
except Exception as e:
# a timeout here is okay, the intention of the wait_for is to reach a
# steady state, this test is not concerned with whether an app reaches
# installing state or not
assert "Timeout waiting for condition" in str(e)
user.client.action(
obj=app,
action_name="upgrade",
answers={"asdf": "asdf"})
def _app_revisions_exist():
a = admin_pc.client.reload(app)
return len(a.revision().data) > 0
wait_for(_app_revisions_exist, timeout=60,
fail_handler=lambda: 'no revisions exist')
proj_user_client = user_project_client(user_mc, project)
app = proj_user_client.reload(app)
revID = app.revision().data[0]['id']
revID = revID.split(":")[1] if ":" in revID else revID
user.client.action(
obj=app,
action_name="rollback",
revisionId=revID
)
def test_readonly_cannot_edit_secret(admin_mc, user_mc, admin_pc,
remove_resource):
"""Tests that a user with readonly access is not able to create/update
a secret or ns secret
"""
project = admin_pc.project
user_client = user_mc.client
prtb = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_mc.user.id,
projectId=project.id,
roleTemplateId="read-only"
)
remove_resource(prtb)
wait_until_available(user_client, project)
proj_user_client = user_project_client(user_mc, project)
# readonly should failed to create a regular secret
with pytest.raises(ApiError) as e:
proj_user_client.create_secret(
name="test-" + random_str(),
stringData={
'abc': '123'
}
)
assert e.value.error.status == 403
secret = admin_pc.client.create_secret(
name="test-" + random_str(),
stringData={
'abc': '123'
}
)
remove_resource(secret)
wait_until_available(admin_pc.client, secret)
# readonly should failed to update a regular secret
with pytest.raises(ApiError) as e:
proj_user_client.update_by_id_secret(
id=secret.id,
stringData={
'asd': 'fgh'
}
)
assert e.value.error.status == 404
ns = admin_pc.cluster.client.create_namespace(
name='test-' + random_str(),
projectId=project.id
)
remove_resource(ns)
# readonly should fail to create ns secret
with pytest.raises(ApiError) as e:
proj_user_client.create_namespaced_secret(
namespaceId=ns.id,
name="test-" + random_str(),
stringData={
'abc': '123'
}
)
assert e.value.error.status == 403
ns_secret = admin_pc.client.create_namespaced_secret(
namespaceId=ns.id,
name="test-" + random_str(),
stringData={
'abc': '123'
}
)
remove_resource(ns_secret)
wait_until_available(admin_pc.client, ns_secret)
# readonly should fail to update ns secret
with pytest.raises(ApiError) as e:
proj_user_client.update_by_id_namespaced_secret(
namespaceId=ns.id,
id=ns_secret.id,
stringData={
'asd': 'fgh'
}
)
assert e.value.error.status == 404
def test_member_can_edit_secret(admin_mc, admin_pc, remove_resource,
user_mc):
"""Tests that a user with project-member role is able to create/update
secrets and namespaced secrets
"""
project = admin_pc.project
user_client = user_mc.client
ns = admin_pc.cluster.client.create_namespace(
name='test-' + random_str(),
projectId=project.id
)
remove_resource(ns)
prtb = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_mc.user.id,
projectId=project.id,
roleTemplateId="project-member"
)
remove_resource(prtb)
wait_until_available(user_client, project)
proj_user_client = user_project_client(user_mc, project)
def try_create_secret():
try:
return proj_user_client.create_secret(
name="secret-" + random_str(),
stringData={
'abc': '123'
}
)
except ApiError as e:
assert e.error.status == 403
return False
# Permission to create secret may not have been granted yet,
# so it will be retried for 45 seconds
secret = wait_for(try_create_secret, fail_handler=lambda:
"do not have permission to create secret")
remove_resource(secret)
wait_until_available(proj_user_client, secret)
proj_user_client.update_by_id_secret(id=secret.id, stringData={
'asd': 'fgh'
})
def try_create_ns_secret():
try:
return proj_user_client.create_namespaced_secret(
name="secret-" + random_str(),
namespaceId=ns.id,
stringData={
"abc": "123"
}
)
except ApiError as e:
assert e.error.status == 403
return False
ns_secret = wait_for(try_create_ns_secret, fail_handler=lambda:
"do not have permission to create ns secret")
remove_resource(ns_secret)
wait_until_available(proj_user_client, ns_secret)
proj_user_client.update_by_id_namespaced_secret(
namespaceId=ns.id,
id=ns_secret.id,
stringData={
"asd": "fgh"
}
)
def test_readonly_cannot_move_namespace(
admin_cc, admin_mc, user_mc, remove_resource):
"""Tests that a user with readonly access is not able to
move namespace across projects. Makes 2 projects and one
namespace and then moves NS across.
"""
p1 = admin_mc.client.create_project(
name='test-' + random_str(),
clusterId=admin_cc.cluster.id
)
remove_resource(p1)
p1 = admin_cc.management.client.wait_success(p1)
p2 = admin_mc.client.create_project(
name='test-' + random_str(),
clusterId=admin_cc.cluster.id
)
remove_resource(p2)
p2 = admin_mc.client.wait_success(p2)
# Use k8s client to see if project namespace exists
k8s_client = kubernetes.client.CoreV1Api(admin_mc.k8s_client)
wait_until(cluster_has_namespace(k8s_client, p1.id.split(":")[1]))
wait_until(cluster_has_namespace(k8s_client, p2.id.split(":")[1]))
prtb = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_mc.user.id,
projectId=p1.id,
roleTemplateId="read-only")
remove_resource(prtb)
prtb2 = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_mc.user.id,
projectId=p2.id,
roleTemplateId="read-only")
remove_resource(prtb2)
wait_until_available(user_mc.client, p1)
wait_until_available(user_mc.client, p2)
ns = admin_cc.client.create_namespace(
name=random_str(),
projectId=p1.id
)
wait_until_available(admin_cc.client, ns)
remove_resource(ns)
cluster_user_client = user_cluster_client(user_mc, admin_cc.cluster)
wait_until_available(cluster_user_client, ns)
with pytest.raises(ApiError) as e:
user_mc.client.action(obj=ns, action_name="move", projectId=p2.id)
assert e.value.error.status == 404
def wait_for_workload(client, ns, timeout=60, count=0):
start = time.time()
interval = 0.5
workloads = client.list_workload(namespaceId=ns)
while len(workloads.data) != count:
if time.time() - start > timeout:
print(workloads)
raise Exception('Timeout waiting for workload service')
time.sleep(interval)
interval *= 2
workloads = client.list_workload(namespaceId=ns)
return workloads
def cluster_has_namespace(client, ns_name):
"""Wait for the give namespace to exist, useful for project namespaces"""
def cb():
return ns_name in \
[ns.metadata.name for ns in client.list_namespace().items]
return cb
| 32,471 | 33.037736 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_kontainer_engine_annotations.py
|
from .common import random_str
from .conftest import wait_until, wait_for
annotation = "clusterstatus.management.cattle.io/" \
"temporary-security-credentials"
access_key = "accessKey"
secret_key = "secretKey"
session_token = "sessionToken"
region = "region"
"""
There are effectively 2 ways that an EKS cluster will get a temporary \
security credentials annotation. The first way is if it is created with \
a session token, then an annotation will be added in the \
cluster_store.go. The other way is if a cluster is edited to add a \
session token. In this case a controller will watch for the change and \
apply the annotation. We test for both of those scenarios here.
"""
def has_cluster_annotation(client, cluster, expected=None):
def poll():
cluster2 = client.reload(cluster)
has_attribute = hasattr(cluster2.annotations, annotation)
if expected is not None:
return has_attribute and cluster2.annotations[annotation] == \
expected
else:
return has_attribute
return poll
def assert_cluster_annotation(expected, admin_mc, remove_resource, config):
cluster = admin_mc.client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig=config)
remove_resource(cluster)
assert cluster.annotations[annotation] == expected
wait_until(has_cluster_annotation(admin_mc.client, cluster))
cluster = admin_mc.client.reload(cluster)
assert cluster.annotations[annotation] == expected
return cluster
def test_eks_cluster_gets_temp_security_credentials_annotation(
admin_mc, remove_resource):
eks = {
access_key: "not a real access key",
secret_key: "not a real secret key",
session_token: "not a real session token",
region: "us-west-2",
}
assert_cluster_annotation("true", admin_mc, remove_resource, eks)
def test_eks_does_not_get_temp_security_creds_annotation_no_field(
admin_mc, remove_resource):
eks = {
access_key: "not a real access key",
secret_key: "not a real secret key",
region: "us-west-2",
}
assert_cluster_annotation("false", admin_mc, remove_resource, eks)
def test_eks_does_not_get_temp_security_creds_annotation_empty_field(
admin_mc, remove_resource):
eks = {
access_key: "not a real access key",
secret_key: "not a real secret key",
session_token: "",
region: "us-west-2",
}
assert_cluster_annotation("false", admin_mc, remove_resource, eks)
def test_editing_eks_cluster_gives_temp_creds_annotation(
admin_mc, remove_resource):
eks = {
access_key: "not a real access key",
secret_key: "not a real secret key",
region: "us-west-2",
}
cluster = assert_cluster_annotation("false", admin_mc, remove_resource,
eks)
eks = cluster.amazonElasticContainerServiceConfig
setattr(eks, session_token, "not a real session token")
cluster = admin_mc.client.update_by_id_cluster(
id=cluster.id,
name=cluster.name,
amazonElasticContainerServiceConfig=eks
)
wait_for(has_cluster_annotation(admin_mc.client, cluster,
expected="true"), timeout=120)
cluster = admin_mc.client.reload(cluster)
assert cluster.annotations[annotation] == "true"
| 3,429 | 29.900901 | 75 |
py
|
rancher
|
rancher-master/tests/integration/suite/__init__.py
| 0 | 0 | 0 |
py
|
|
rancher
|
rancher-master/tests/integration/suite/test_notifier.py
|
from kubernetes.client import CustomObjectsApi
from .common import random_str
def test_notifier_smtp_password(admin_mc, remove_resource):
client = admin_mc.client
name = random_str()
password = random_str()
notifier = client.create_notifier(clusterId="local",
name=name,
smtpConfig={
"defaultRecipient": "test",
"host": "test",
"port": "587",
"sender": "test",
"tls": "true",
"username": "test",
"password": password
})
remove_resource(notifier)
assert notifier is not None
# Test password not present in api
assert notifier['smtpConfig'].get('password') is None
crd_client = get_crd_client(admin_mc)
ns, name = notifier["id"].split(":")
# Test password is in k8s after creation
verify_smtp_password(crd_client, ns, name, password)
# Test noop, password field should be as it is
notifier = client.update(notifier, smtpConfig=notifier['smtpConfig'])
verify_smtp_password(crd_client, ns, name, password)
# Test updating password
new_password = random_str()
notifier = client.update(notifier, smtpConfig={
"password": new_password})
verify_smtp_password(crd_client, ns, name, new_password)
# Test updating field non-password related
notifier = client.update(notifier, smtpConfig={"username": "test2"})
notifier = client.reload(notifier)
assert notifier["smtpConfig"]["username"] == "test2"
# Test the password in crd remains the same value after updating username
verify_smtp_password(crd_client, ns, name, new_password)
def verify_smtp_password(crd_client, ns, name, password):
crd_dict = {
'group': 'management.cattle.io',
'version': 'v3',
'namespace': 'local',
'plural': 'notifiers',
'name': name,
}
k8s_notifier = crd_client.get_namespaced_custom_object(**crd_dict)
smtp_password = k8s_notifier['spec']['smtpConfig']['password']
assert smtp_password == password
def get_crd_client(admin_mc):
return CustomObjectsApi(admin_mc.k8s_client)
| 2,429 | 37.571429 | 77 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_project_catalog.py
|
from .conftest import wait_until, wait_until_available, wait_for_condition,\
user_project_client
from rancher import ApiError
from .common import random_str
import time
def test_project_catalog_creation(admin_mc, remove_resource,
user_mc, user_factory, admin_pc,
admin_cc):
client = admin_mc.client
# When project-owner tries to create project catalog, it should succeed
prtb_owner = client.create_project_role_template_binding(
projectId=admin_pc.project.id,
roleTemplateId="project-owner",
userId=admin_mc.user.id,)
remove_resource(prtb_owner)
wait_until(prtb_cb(client, prtb_owner))
project_owner_client = client
name = random_str()
project_name = str.lstrip(admin_pc.project.id, "local:")
catalog_name = project_name + ":" + name
url = "https://github.com/mrajashree/charts.git"
project = admin_pc.project
project_catalog = \
project_owner_client.create_project_catalog(name=name,
branch="onlyOne",
url=url,
projectId=project.id,
)
wait_for_projectcatalog_template_to_be_created(project_owner_client,
catalog_name)
# The project-owner should now be able to access the project level
# catalog and its templates
cc = project_owner_client.list_project_catalog(name=name)
assert len(cc) == 1
templates = \
project_owner_client.list_template(projectCatalogId=catalog_name)
assert len(templates) == 1
templateversions = \
project_owner_client.list_template(projectCatalogId=catalog_name)
assert len(templateversions) == 1
# Now add a user as project-member to this project
prtb_member = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId="project-member",
userId=user_mc.user.id,)
remove_resource(prtb_member)
wait_until_available(user_mc.client, admin_cc.cluster)
wait_until(prtb_cb(client, prtb_member))
project_member_client = user_mc.client
# The project-member should now be able to access the project level
# catalog and its templates
cc = project_member_client.list_project_catalog()
assert len(cc) == 1
templates = \
project_member_client.list_template(projectCatalogId=catalog_name)
assert len(templates) == 1
templateversions = \
project_member_client.list_template(projectCatalogId=catalog_name)
assert len(templateversions) == 1
# But project-member should not be able to create a project catalog
try:
project_member_client.create_project_catalog(name=random_str(),
branch="onlyOne",
url=url,
projectId=project.id,
)
except ApiError as e:
assert e.error.status == 403
# Create another user and don't add to project, this user should not
# be able to access this cluster catalog or its templates
user2 = user_factory()
templates = \
user2.client.list_template(projectCatalogId=catalog_name)
assert len(templates) == 0
cc = user2.client.list_cluster_catalog(name=name)
assert len(cc) == 0
client.delete(project_catalog)
wait_for_projectcatalog_template_to_be_deleted(client, catalog_name)
def test_create_project_catalog_after_user_addition(admin_mc,
user_factory,
remove_resource,
admin_pc):
# Create a new user
user1 = user_factory()
remove_resource(user1)
client = admin_mc.client
project = admin_pc.project
# Add this user as project-member
prtb_member = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId="project-member",
userId=user1.user.id)
remove_resource(prtb_member)
wait_until(prtb_cb(client, prtb_member))
# Create project-level catalog for this project as admin
name = random_str()
project_name = str.lstrip(admin_pc.project.id, "local:")
catalog_name = project_name + ":" + name
url = "https://github.com/mrajashree/charts.git"
project = admin_pc.project
project_owner_client = client
project_catalog = \
project_owner_client.create_project_catalog(name=name,
branch="onlyOne",
url=url,
projectId=project.id,
)
wait_for_projectcatalog_template_to_be_created(project_owner_client,
catalog_name)
# The project-owner should now be able to access the project level
# catalog and its templates
cc = project_owner_client.list_project_catalog(name=name)
assert len(cc) == 1
templates = \
project_owner_client.list_template(projectCatalogId=catalog_name)
assert len(templates) == 1
templateversions = \
project_owner_client.list_template(projectCatalogId=catalog_name)
assert len(templateversions) == 1
project_member_client = user1.client
# The project-member should now be able to access the project level
# catalog and its templates
cc = project_member_client.list_project_catalog()
assert len(cc) == 1
templates = \
project_member_client.list_template(projectCatalogId=catalog_name)
assert len(templates) == 1
client.delete(project_catalog)
wait_for_projectcatalog_template_to_be_deleted(client, catalog_name)
def test_user_addition_after_creating_project_catalog(admin_mc,
user_factory,
remove_resource,
admin_pc):
# Create project-level catalog for this project as admin
client = admin_mc.client
name = random_str()
project_name = str.lstrip(admin_pc.project.id, "local:")
catalog_name = project_name + ":" + name
url = "https://github.com/mrajashree/charts.git"
project = admin_pc.project
project_owner_client = client
project_catalog = \
project_owner_client.create_project_catalog(name=name,
branch="onlyOne",
url=url,
projectId=project.id,
)
wait_for_projectcatalog_template_to_be_created(project_owner_client,
catalog_name)
# The project-owner should now be able to access the project level
# catalog and its templates
cc = project_owner_client.list_project_catalog(name=name)
assert len(cc) == 1
templates = \
project_owner_client.list_template(projectCatalogId=catalog_name)
assert len(templates) == 1
templateverions = \
project_owner_client.list_template(projectCatalogId=catalog_name)
assert len(templateverions) == 1
# Create a new user
user1 = user_factory()
remove_resource(user1)
project = admin_pc.project
# Add this user as project-member
prtb_member = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId="project-member",
userId=user1.user.id)
remove_resource(prtb_member)
wait_until(prtb_cb(client, prtb_member))
project_member_client = user1.client
# The project-member should now be able to access the project level
# catalog and its templates
cc = project_member_client.list_project_catalog()
assert len(cc) == 1
templates = \
project_member_client.list_template(projectCatalogId=catalog_name)
assert len(templates) == 1
templateversions = \
project_member_client.list_template(projectCatalogId=catalog_name)
assert len(templateversions) == 1
client.delete(project_catalog)
wait_for_projectcatalog_template_to_be_deleted(client, catalog_name)
def test_project_catalog_access_before_app_creation(admin_mc, admin_pc,
remove_resource,
user_factory):
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=admin_pc.
project.id)
remove_resource(ns)
client = admin_mc.client
name = random_str()
new_project = client.create_project(name=random_str(), clusterId='local')
remove_resource(new_project)
wait_for_condition('InitialRolesPopulated', 'True', client, new_project)
new_project = client.reload(new_project)
project_name = str.lstrip(new_project.id, "local:")
catalog_name = project_name + ":" + name
url = "https://github.com/rancher/integration-test-charts.git"
client.create_project_catalog(name=name,
branch="master",
url=url,
projectId=new_project.id,
)
wait_for_projectcatalog_template_to_be_created(client,
catalog_name)
external_id = "catalog://?catalog="+project_name+"/"+name + \
"&type=projectCatalog&template=chartmuseum" \
"&version=1.6.0"
user = user_factory()
# Add this user as project-owner
prtb_owner = client.create_project_role_template_binding(
projectId=admin_pc.project.id,
roleTemplateId="project-owner",
userId=user.user.id)
remove_resource(prtb_owner)
wait_until(prtb_cb(client, prtb_owner))
u_p_client = user_project_client(user, admin_pc.project)
try:
# creating app in user's project, using template version from
# new_project should not be allowed
u_p_client.create_app(
name=random_str(),
externalId=external_id,
targetNamespace=ns.name,
projectId=admin_pc.project.id,
)
except ApiError as e:
assert e.error.status == 404
assert "Cannot find template version" in e.error.message
def wait_for_projectcatalog_template_to_be_created(client, name, timeout=45):
found = False
start = time.time()
interval = 0.5
while not found:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for templates")
templates = client.list_template(projectCatalogId=name)
if len(templates) > 0:
found = True
time.sleep(interval)
interval *= 2
def wait_for_projectcatalog_template_to_be_deleted(client, name, timeout=45):
found = False
start = time.time()
interval = 0.5
while not found:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for templates")
templates = client.list_template(projectCatalogId=name)
if len(templates) == 0:
found = True
time.sleep(interval)
interval *= 2
def prtb_cb(client, prtb):
"""Wait for the crtb to have the userId populated"""
def cb():
p = client.reload(prtb)
return p.userPrincipalId is not None
return cb
def cr_rule_template(api_instance, cr_name, cr, resource):
def cb():
c = api_instance.read_cluster_role(cr_name)
cr_rules = c.rules
for i in range(0, len(cr_rules)):
if cr_rules[i].resources[0] == resource:
return cr_rules[i].resource_names is not None
return cb
| 12,258 | 37.91746 | 77 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_users.py
|
import pytest
from kubernetes.client import CustomObjectsApi
from rancher import ApiError
from .conftest import random_str, wait_for
grbAnno = "cleanup.cattle.io/grbUpgradeCluster"
rtAnno = "cleanup.cattle.io/rtUpgradeCluster"
def test_user_cant_delete_self(admin_mc):
client = admin_mc.client
with pytest.raises(ApiError) as e:
client.delete(admin_mc.user)
assert e.value.error.status == 422
def test_user_cant_deactivate_self(admin_mc):
client = admin_mc.client
with pytest.raises(ApiError) as e:
client.update(admin_mc.user, enabled=False)
assert e.value.error.status == 422
def test_globalrolebinding_finalizer_cleanup(admin_mc, remove_resource):
"""This ensures that globalrolebinding cleanup of clusters < v2.2.8
is performed correctly"""
client = admin_mc.client
grb = client.create_globalRoleBinding(
globalRoleId="admin", userId="u-" + random_str()
)
remove_resource(grb)
assert grb.annotations[grbAnno] == "true"
# create a grb without the rancher api with a bad finalizer
api = CustomObjectsApi(admin_mc.k8s_client)
json = {
"apiVersion": "management.cattle.io/v3",
"globalRoleName": "admin",
"kind": "GlobalRoleBinding",
"metadata": {
"finalizers": ["clusterscoped.controller.cattle.io/grb-sync_fake"],
"generation": 1,
"name": "grb-" + random_str(),
},
"userName": "u-" + random_str(),
}
grb_k8s = api.create_cluster_custom_object(
group="management.cattle.io",
version="v3",
plural="globalrolebindings",
body=json,
)
grb_name = grb_k8s["metadata"]["name"]
grb_k8s = client.by_id_globalRoleBinding(id=grb_name)
remove_resource(grb_k8s)
def check_annotation():
grb1 = client.by_id_globalRoleBinding(grb_k8s.id)
try:
if grb1.annotations[grbAnno] == "true":
return True
else:
return False
except (AttributeError, KeyError):
return False
wait_for(check_annotation, fail_handler=lambda: "annotation was not added")
grb1 = api.get_cluster_custom_object(
group="management.cattle.io",
version="v3",
plural="globalrolebindings",
name=grb_k8s.id,
)
assert (
"clusterscoped.controller.cattle.io/grb-sync_fake"
not in grb1["metadata"]["finalizers"]
)
def test_roletemplate_finalizer_cleanup(admin_mc, remove_resource):
""" This ensures that roletemplates cleanup for clusters < v2.2.8
is performed correctly"""
client = admin_mc.client
rt = client.create_roleTemplate(name="rt-" + random_str())
remove_resource(rt)
assert rt.annotations[rtAnno] == "true"
# create rt without rancher api with a bad finalizer
api = CustomObjectsApi(admin_mc.k8s_client)
json = {
"apiVersion": "management.cattle.io/v3",
"kind": "RoleTemplate",
"metadata": {
"finalizers": [
"clusterscoped.controller.cattle.io/" +
"cluster-roletemplate-sync_fake",
"fake-finalizer"
],
"name": "test-" + random_str(),
}
}
rt_k8s = api.create_cluster_custom_object(
group="management.cattle.io",
version="v3",
plural="roletemplates",
body=json,
)
rt_name = rt_k8s["metadata"]["name"]
rt_k8s = client.by_id_roleTemplate(id=rt_name)
remove_resource(rt_k8s)
def check_annotation():
rt1 = client.by_id_roleTemplate(rt_k8s.id)
try:
if rt1.annotations[rtAnno] == "true":
return True
else:
return False
except (AttributeError, KeyError):
return False
wait_for(check_annotation, fail_handler=lambda: "annotation was not added")
rt1 = api.get_cluster_custom_object(
group="management.cattle.io",
version="v3",
plural="roletemplates",
name=rt_k8s.id,
)
if "finalizers" in rt1["metadata"]:
assert "clusterscoped.controller.cattle.io/grb-sync_fake" \
not in rt1["metadata"]["finalizers"]
| 4,230 | 30.81203 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_kontainer_engine_config.py
|
from .common import random_str
def test_gke_config_appears_correctly(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), googleKubernetesEngineConfig={
"credentials": "bad credentials",
"nodeCount": 3
})
remove_resource(cluster)
# test that a cluster returned from a POST has the correct config
assert cluster.googleKubernetesEngineConfig.nodeCount == 3
clusters = admin_mc.client.list_cluster(name=cluster.name)
# test that a cluster returned from a list has the correct config
assert len(clusters) == 1
assert clusters.data[0].googleKubernetesEngineConfig.nodeCount == 3
cluster = admin_mc.client.by_id_cluster(id=cluster.id)
# test that a cluster returned from a GET has the correct config
assert cluster.googleKubernetesEngineConfig.nodeCount == 3
cluster.googleKubernetesEngineConfig.nodeCount = 4
cluster = admin_mc.client.update_by_id_cluster(cluster.id, cluster)
# test that a cluster returned from a PUT has the correct config
assert cluster.googleKubernetesEngineConfig.nodeCount == 4
def test_eks_config_appears_correctly(admin_mc, remove_resource):
""" Simple test to ensure that cluster returned from POST is correct"""
cluster = admin_mc.client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "MyAccessKey",
"ami": "",
"associateWorkerNodePublicIp": True,
"displayName": "EKS-api-cluster",
"driverName": "amazonelasticcontainerservice",
"instanceType": "t3.small",
"kubernetesVersion": "1.14",
"maximumNodes": 3,
"minimumNodes": 1,
"region": "us-east-2",
"secretKey": "secret-key",
"serviceRole": "",
"sessionToken": "",
"userData": "!#/bin/bash\ntouch /tmp/testfile.txt",
"virtualNetwork": "",
})
remove_resource(cluster)
# test cluster returned from POST has correct config
assert cluster.amazonElasticContainerServiceConfig.maximumNodes == 3
assert (cluster.amazonElasticContainerServiceConfig.userData ==
"!#/bin/bash\ntouch /tmp/testfile.txt")
clusters = admin_mc.client.list_cluster(name=cluster.name)
# test that a cluster returned from a list has the correct config
assert len(clusters) == 1
assert (clusters.data[0].amazonElasticContainerServiceConfig.maximumNodes
== 3)
cluster = admin_mc.client.by_id_cluster(cluster.id)
# test that a cluster returned from a GET has the correct config
assert cluster.amazonElasticContainerServiceConfig.maximumNodes == 3
cluster.amazonElasticContainerServiceConfig.maximumNodes = 5
cluster = admin_mc.client.update_by_id_cluster(cluster.id, cluster)
# test that cluster returned from PUT has correct config
assert cluster.amazonElasticContainerServiceConfig.maximumNodes == 5
def test_rke_config_appears_correctly(admin_mc, remove_resource):
""" Testing a single field from the RKE config to ensure that the
schema is properly populated"""
cluster = admin_mc.client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"kubernetesVersion": "v1.12.9-rancher1-1",
})
remove_resource(cluster)
k8s_version = cluster.rancherKubernetesEngineConfig.kubernetesVersion
assert k8s_version == "v1.12.9-rancher1-1"
def test_rke_config_no_change_k8sversion_addon(admin_mc, remove_resource):
""" Testing if kubernetesVersion stays the same after updating
something else in the cluster, e.g. addonJobTimeout"""
k8s_version = "v1.12.9-rancher1-1"
cluster = admin_mc.client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"kubernetesVersion": k8s_version,
})
remove_resource(cluster)
cluster = admin_mc.client.update_by_id_cluster(
id=cluster.id,
name=cluster.name,
rancherKubernetesEngineConfig={
"addonJobTimeout": 45,
})
k8s_version_post = cluster.rancherKubernetesEngineConfig.kubernetesVersion
assert k8s_version_post == k8s_version
def test_rke_config_no_change_k8sversion_np(admin_mc, remove_resource):
""" Testing if kubernetesVersion stays the same after updating
something else in the cluster, e.g. addonJobTimeout"""
cluster_config_np_false = {
"enableNetworkPolicy": "false",
"rancherKubernetesEngineConfig": {
"addonJobTimeout": 45,
"kubernetesVersion": "v1.12.9-rancher1-1",
"network": {
"plugin": "canal",
}
}
}
cluster = admin_mc.client.create_cluster(
name=random_str(),
cluster=cluster_config_np_false,
)
remove_resource(cluster)
cluster_config_np_true = {
"name": cluster.name,
"enableNetworkPolicy": "true",
"rancherKubernetesEngineConfig": {
"network": {
"plugin": "canal",
}
}
}
cluster = admin_mc.client.update_by_id_cluster(
cluster.id,
cluster_config_np_true,
)
cluster_config_addonjob = {
"name": cluster.name,
"rancherKubernetesEngineConfig": {
"addonJobTimeout": 55,
"network": {
"plugin": "canal",
}
}
}
cluster = admin_mc.client.update_by_id_cluster(
cluster.id,
cluster_config_addonjob,
)
assert cluster.enableNetworkPolicy is True
| 5,639 | 34.25 | 78 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_settings.py
|
import pytest
from rancher import ApiError
from .common import random_str
from .conftest import wait_until
# cacerts is readOnly, and should not be able to be set through the API
def test_create_read_only(admin_mc, remove_resource):
client = admin_mc.client
with pytest.raises(ApiError) as e:
client.create_setting(name="cacerts", value="a")
assert e.value.error.status == 405
assert "readOnly" in e.value.error.message
# cacerts is readOnly setting, and should not be able to be updated through API
def test_update_read_only(admin_mc, remove_resource):
client = admin_mc.client
with pytest.raises(ApiError) as e:
client.update_by_id_setting(id="cacerts", value="b")
assert e.value.error.status == 405
assert "readOnly" in e.value.error.message
# cacerts is readOnly, and should be able to be read
def test_get_read_only(admin_mc, remove_resource):
client = admin_mc.client
client.by_id_setting(id="cacerts")
# cacerts is readOnly, and user should not be able to delete
def test_delete_read_only(admin_mc, remove_resource):
client = admin_mc.client
setting = client.by_id_setting(id="cacerts")
with pytest.raises(ApiError) as e:
client.delete(setting)
assert e.value.error.status == 405
assert "readOnly" in e.value.error.message
# user should be able to create a setting that does not exist yet
def test_create(admin_mc, remove_resource):
client = admin_mc.client
setting = client.create_setting(name="samplesetting1", value="a")
remove_resource(setting)
assert setting.value == "a"
# user should not be able to create a setting if it already exists
def test_create_existing(admin_mc, remove_resource):
client = admin_mc.client
setting = client.create_setting(name="samplefsetting2", value="a")
remove_resource(setting)
with pytest.raises(ApiError) as e:
setting2 = client.create_setting(name="samplefsetting2", value="a")
remove_resource(setting2)
assert e.value.error.status == 409
assert e.value.error.code == "AlreadyExists"
# user should be able to update a setting if it exists
def test_update(admin_mc, remove_resource):
client = admin_mc.client
setting = client.create_setting(name="samplesetting3", value="a")
remove_resource(setting)
setting = client.update_by_id_setting(id="samplesetting3", value="b")
assert setting.value == "b"
# user should not be able to update a setting if it does not exists
def test_update_nonexisting(admin_mc, remove_resource):
client = admin_mc.client
with pytest.raises(ApiError) as e:
setting = client.update_by_id_setting(id="samplesetting4", value="a")
remove_resource(setting)
assert e.value.error.status == 404
assert e.value.error.code == "NotFound"
def test_update_link(admin_mc, user_factory, remove_resource):
client = admin_mc.client
setting = client.create_setting(name=random_str(), value="a")
remove_resource(setting)
wait_until(lambda: client.reload(setting) is not None)
# admin should see update link
setting = client.reload(setting)
assert 'update' in setting.links
# create standard user
user = user_factory()
# this user should not be able to see update link
setting = user.client.reload(setting)
assert 'update' not in setting.links
| 3,367 | 30.185185 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_project_resource_quota.py
|
import pytest
from .common import random_str
import time
from rancher import ApiError
def ns_default_quota():
return {"limit": {"pods": "4"}}
def ns_quota():
return {"limit": {"pods": "4"}}
def ns_small_quota():
return {"limit": {"pods": "1"}}
def ns_large_quota():
return {"limit": {"pods": "200"}}
def default_project_quota():
return {"limit": {"pods": "100"}}
def ns_default_limit():
return {"requestsCpu": "1",
"requestsMemory": "1Gi",
"limitsCpu": "2",
"limitsMemory": "2Gi"}
def wait_for_applied_quota_set(admin_cc_client, ns, timeout=30):
start = time.time()
ns = admin_cc_client.reload(ns)
a = ns.annotations["cattle.io/status"]
while a is None:
time.sleep(.5)
ns = admin_cc_client.reload(ns)
a = ns.annotations["cattle.io/status"]
if time.time() - start > timeout:
raise Exception('Timeout waiting for'
' resource quota to be validated')
while "Validating resource quota" in a or "exceeds project limit" in a:
time.sleep(.5)
ns = admin_cc_client.reload(ns)
a = ns.annotations["cattle.io/status"]
if time.time() - start > timeout:
raise Exception('Timeout waiting for'
' resource quota to be validated')
def test_namespace_resource_quota(admin_cc, admin_pc):
q = default_project_quota()
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=q,
namespaceDefaultResourceQuota=q)
p = admin_cc.management.client.wait_success(p)
assert p.resourceQuota is not None
client = admin_pc.cluster.client
ns = client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=ns_quota())
assert ns is not None
assert ns.resourceQuota is not None
wait_for_applied_quota_set(admin_pc.cluster.client,
ns)
def test_project_resource_quota_fields(admin_cc):
q = default_project_quota()
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=q,
namespaceDefaultResourceQuota=q)
p = admin_cc.management.client.wait_success(p)
assert p.resourceQuota is not None
assert p.resourceQuota.limit.pods == '100'
assert p.namespaceDefaultResourceQuota is not None
assert p.namespaceDefaultResourceQuota.limit.pods == '100'
def test_resource_quota_ns_create(admin_cc, admin_pc):
q = default_project_quota()
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=q,
namespaceDefaultResourceQuota=q)
p = admin_cc.management.client.wait_success(p)
assert p.resourceQuota is not None
assert p.resourceQuota.limit.pods == '100'
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=ns_quota())
assert ns is not None
assert ns.resourceQuota is not None
wait_for_applied_quota_set(admin_pc.cluster.client, ns)
def test_default_resource_quota_ns_set(admin_cc, admin_pc):
q = ns_default_quota()
pq = default_project_quota()
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=pq,
namespaceDefaultResourceQuota=q)
assert p.resourceQuota is not None
assert p.namespaceDefaultResourceQuota is not None
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p.id)
wait_for_applied_quota_set(admin_pc.cluster.client,
ns)
def test_quota_ns_create_exceed(admin_cc, admin_pc):
q = default_project_quota()
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=q,
namespaceDefaultResourceQuota=q)
p = admin_cc.management.client.wait_success(p)
assert p.resourceQuota is not None
# namespace quota exceeding project resource quota
cluster_client = admin_pc.cluster.client
with pytest.raises(ApiError) as e:
cluster_client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=ns_large_quota())
assert e.value.error.status == 422
def test_default_resource_quota_ns_create_invalid_combined(admin_cc, admin_pc):
pq = default_project_quota()
q = ns_default_quota()
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=pq,
namespaceDefaultResourceQuota=q)
p = admin_cc.management.client.wait_success(p)
assert p.resourceQuota is not None
assert p.namespaceDefaultResourceQuota is not None
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=ns_quota())
assert ns is not None
assert ns.resourceQuota is not None
# namespace quota exceeding project resource quota
cluster_client = admin_pc.cluster.client
with pytest.raises(ApiError) as e:
cluster_client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=ns_large_quota())
assert e.value.error.status == 422
# quota within limits
ns = cluster_client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=ns_small_quota())
assert ns is not None
assert ns.resourceQuota is not None
wait_for_applied_quota_set(admin_pc.cluster.client, ns, 10)
ns = admin_cc.client.reload(ns)
# update namespace with exceeding quota
with pytest.raises(ApiError) as e:
admin_pc.cluster.client.update(ns,
resourceQuota=ns_large_quota())
assert e.value.error.status == 422
def test_project_used_quota(admin_cc, admin_pc):
pq = default_project_quota()
q = ns_default_quota()
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=pq,
namespaceDefaultResourceQuota=q)
p = admin_cc.management.client.wait_success(p)
assert p.resourceQuota is not None
assert p.namespaceDefaultResourceQuota is not None
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p.id)
wait_for_applied_quota_set(admin_pc.cluster.client,
ns)
used = wait_for_used_pods_limit_set(admin_cc.management.client, p)
assert used.pods == "4"
def wait_for_used_pods_limit_set(admin_cc_client, project, timeout=30,
value="0"):
start = time.time()
project = admin_cc_client.reload(project)
while "usedLimit" not in project.resourceQuota \
or "pods" not in project.resourceQuota.usedLimit:
time.sleep(.5)
project = admin_cc_client.reload(project)
if time.time() - start > timeout:
raise Exception('Timeout waiting for'
' project.usedLimit.pods to be set')
if value == "0":
return project.resourceQuota.usedLimit
while project.resourceQuota.usedLimit.pods != value:
time.sleep(.5)
project = admin_cc_client.reload(project)
if time.time() - start > timeout:
raise Exception('Timeout waiting for'
' project.usedLimit.pods to be set to ' + value)
def test_default_resource_quota_project_update(admin_cc, admin_pc):
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id)
ns = admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p.id)
wait_for_applied_quota_set(admin_pc.cluster.client, ns, 10)
pq = default_project_quota()
q = ns_default_quota()
p = admin_cc.management.client.update(p,
resourceQuota=pq,
namespaceDefaultResourceQuota=q)
assert p.resourceQuota is not None
assert p.namespaceDefaultResourceQuota is not None
wait_for_applied_quota_set(admin_pc.cluster.client,
ns)
def test_api_validation_project(admin_cc):
client = admin_cc.management.client
q = default_project_quota()
# default namespace quota missing
with pytest.raises(ApiError) as e:
client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=q)
assert e.value.error.status == 422
# default namespace quota as None
with pytest.raises(ApiError) as e:
client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=q,
namespaceDefaultResourceQuota=None)
assert e.value.error.status == 422
with pytest.raises(ApiError) as e:
client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
namespaceDefaultResourceQuota=q)
assert e.value.error.status == 422
lq = ns_large_quota()
with pytest.raises(ApiError) as e:
client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=q,
namespaceDefaultResourceQuota=lq)
assert e.value.error.status == 422
pq = {"limit": {"pods": "100",
"services": "100"}}
iq = {"limit": {"pods": "100"}}
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id)
with pytest.raises(ApiError) as e:
admin_cc.management.client.update(p,
resourceQuota=pq,
namespaceDefaultResourceQuota=iq)
def test_api_validation_namespace(admin_cc, admin_pc):
pq = {"limit": {"pods": "100",
"services": "100"}}
q = {"limit": {"pods": "10",
"services": "10"}}
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=pq,
namespaceDefaultResourceQuota=q)
p = admin_cc.management.client.wait_success(p)
assert p.resourceQuota is not None
assert p.namespaceDefaultResourceQuota is not None
nsq = {"limit": {"pods": "10"}}
with pytest.raises(ApiError) as e:
admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=nsq)
assert e.value.error.status == 422
def test_used_quota_exact_match(admin_cc, admin_pc):
pq = {"limit": {"pods": "10"}}
q = {"limit": {"pods": "2"}}
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=pq,
namespaceDefaultResourceQuota=q)
p = admin_cc.management.client.wait_success(p)
assert p.resourceQuota is not None
assert p.namespaceDefaultResourceQuota is not None
nsq = {"limit": {"pods": "2"}}
admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=nsq)
nsq = {"limit": {"pods": "8"}}
admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=nsq)
wait_for_used_pods_limit_set(admin_cc.management.client, p, 10, "10")
# try reducing the project quota
pq = {"limit": {"pods": "8"}}
dq = {"limit": {"pods": "1"}}
with pytest.raises(ApiError) as e:
admin_cc.management.client.update(p,
resourceQuota=pq,
namespaceDefaultResourceQuota=dq)
assert e.value.error.status == 422
def test_add_remove_fields(admin_cc, admin_pc):
pq = {"limit": {"pods": "10"}}
q = {"limit": {"pods": "2"}}
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=pq,
namespaceDefaultResourceQuota=q)
p = admin_cc.management.client.wait_success(p)
assert p.resourceQuota is not None
assert p.namespaceDefaultResourceQuota is not None
nsq = {"limit": {"pods": "2"}}
admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=nsq)
wait_for_used_pods_limit_set(admin_cc.management.client, p,
10, "2")
admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=nsq)
wait_for_used_pods_limit_set(admin_cc.management.client, p,
10, "4")
# update project with services quota
with pytest.raises(ApiError) as e:
pq = {"limit": {"pods": "10", "services": "10"}}
dq = {"limit": {"pods": "2", "services": "7"}}
admin_cc.management.client.update(p,
resourceQuota=pq,
namespaceDefaultResourceQuota=dq)
assert e.value.error.status == 422
pq = {"limit": {"pods": "10", "services": "10"}}
dq = {"limit": {"pods": "2", "services": "2"}}
p = admin_cc.management.client.update(p,
resourceQuota=pq,
namespaceDefaultResourceQuota=dq)
wait_for_used_svcs_limit_set(admin_cc.management.client, p,
30, "4")
# remove services quota
pq = {"limit": {"pods": "10"}}
dq = {"limit": {"pods": "2"}}
p = admin_cc.management.client.update(p,
resourceQuota=pq,
namespaceDefaultResourceQuota=dq)
wait_for_used_svcs_limit_set(admin_cc.management.client, p,
30, "0")
def wait_for_used_svcs_limit_set(admin_cc_client, project, timeout=30,
value="0"):
start = time.time()
project = admin_cc_client.reload(project)
while "usedLimit" not in project.resourceQuota \
or "services" not in project.resourceQuota.usedLimit:
time.sleep(.5)
project = admin_cc_client.reload(project)
if time.time() - start > timeout:
raise Exception('Timeout waiting for'
' project.usedLimit.services to be set')
if value == "0":
return project.resourceQuota.usedLimit
while project.resourceQuota.usedLimit.services != value:
time.sleep(.5)
project = admin_cc_client.reload(project)
if time.time() - start > timeout:
raise Exception('Timeout waiting for'
' usedLimit.services to be set to ' + value)
def test_update_quota(admin_cc, admin_pc):
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id)
p = admin_cc.management.client.wait_success(p)
# create 4 namespaces
for x in range(4):
admin_pc.cluster.client.create_namespace(name=random_str(),
projectId=p.id)
# update project with default namespace quota that
# won't qualify for the limit
with pytest.raises(ApiError) as e:
pq = {"limit": {"pods": "5"}}
dq = {"limit": {"pods": "2"}}
admin_cc.management.client.update(p,
resourceQuota=pq,
namespaceDefaultResourceQuota=dq)
assert e.value.error.status == 422
def test_container_resource_limit(admin_cc, admin_pc):
q = default_project_quota()
lmt = ns_default_limit()
client = admin_cc.management.client
p = client.create_project(name='test-' + random_str(),
clusterId=admin_cc.cluster.id,
resourceQuota=q,
namespaceDefaultResourceQuota=q,
containerDefaultResourceLimit=lmt)
p = admin_cc.management.client.wait_success(p)
assert p.resourceQuota is not None
assert p.containerDefaultResourceLimit is not None
client = admin_pc.cluster.client
ns = client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota=ns_quota())
assert ns is not None
assert ns.resourceQuota is not None
assert ns.containerDefaultResourceLimit is not None
wait_for_applied_quota_set(admin_pc.cluster.client,
ns)
# reset the container limit
p = admin_cc.management.client.update(p,
containerDefaultResourceLimit=None)
assert p.containerDefaultResourceLimit is None
ns = admin_cc.management.client.update(ns,
containerDefaultResourceLimit=None)
assert len(ns.containerDefaultResourceLimit) == 0
| 19,291 | 38.859504 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_cluster.py
|
import kubernetes
from .common import random_str
from .conftest import wait_for, kubernetes_api_client
from kubernetes.client import CustomObjectsApi
from kubernetes.client.rest import ApiException
def test_cluster_node_count(admin_mc, remove_resource,
raw_remove_custom_resource):
"""Test that the cluster node count gets updated as nodes are added"""
client = admin_mc.client
cluster = client.create_cluster(
name=random_str(),
rancherKubernetesEngineConfig={
"accessKey": "junk"
}
)
remove_resource(cluster)
def _check_node_count(cluster, nodes):
c = client.reload(cluster)
return c.nodeCount == nodes
def _node_count_fail(cluster, nodes):
c = client.reload(cluster)
s = "cluster {} failed to have proper node count, expected: {} has: {}"
return s.format(c.id, nodes, c.nodeCount)
node_count = 0
wait_for(lambda: _check_node_count(cluster, node_count),
fail_handler=lambda: _node_count_fail(cluster, node_count))
# Wait for cluster ns to be created
k8s_client = kubernetes_api_client(admin_mc.client, 'local')
ns_api = kubernetes.client.CoreV1Api(k8s_client)
def _check_cluster_ns(cluster):
try:
ns = ns_api.read_namespace(cluster.id)
except ApiException as e:
if e.status != 404:
raise(e)
return False
else:
return ns is not None
def _check_cluster_ns_fail(cluster):
s = "cluster {} namespace isn't created"
return s.format(cluster.id)
wait_for(lambda: _check_cluster_ns(cluster),
fail_handler=lambda: _check_cluster_ns_fail(cluster))
# Nodes have to be created manually through k8s client to attach to a
# pending cluster
k8s_dynamic_client = CustomObjectsApi(admin_mc.k8s_client)
body = {
"metadata": {
"name": random_str(),
"namespace": cluster.id,
},
"kind": "Node",
"apiVersion": "management.cattle.io/v3",
}
dynamic_nt = k8s_dynamic_client.create_namespaced_custom_object(
"management.cattle.io", "v3", cluster.id, 'nodes', body)
raw_remove_custom_resource(dynamic_nt)
node_count = 1
wait_for(lambda: _check_node_count(cluster, node_count),
fail_handler=lambda: _node_count_fail(cluster, node_count))
# Create node number 2
body['metadata']['name'] = random_str()
dynamic_nt1 = k8s_dynamic_client.create_namespaced_custom_object(
"management.cattle.io", "v3", cluster.id, 'nodes', body)
raw_remove_custom_resource(dynamic_nt1)
node_count = 2
wait_for(lambda: _check_node_count(cluster, node_count),
fail_handler=lambda: _node_count_fail(cluster, node_count))
# Delete a node
k8s_dynamic_client.delete_namespaced_custom_object(
"management.cattle.io", "v3", cluster.id, 'nodes',
dynamic_nt1['metadata']['name'])
node_count = 1
wait_for(lambda: _check_node_count(cluster, node_count),
fail_handler=lambda: _node_count_fail(cluster, node_count))
| 3,167 | 33.064516 | 79 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_dynamic_schemas.py
|
import copy
import pytest
from .conftest import wait_for
@pytest.mark.nonparallel
def test_dynamic_schemas_update(request, admin_mc):
assert not schema_has_field(admin_mc)
eks_schema = admin_mc.client.by_id_dynamicSchema(
'amazonelasticcontainerserviceconfig')
new_field = copy.deepcopy(eks_schema.resourceFields['displayName'])
new_field.description = 'My special field.'
setattr(eks_schema.resourceFields, 'mySpecialField', new_field)
admin_mc.client.update_by_id_dynamicSchema(eks_schema.id, eks_schema)
request.addfinalizer(lambda: cleanup_extra_field(admin_mc))
wait_for(lambda: schema_has_field(admin_mc),
fail_handler=lambda: "could not add extra field",
timeout=120)
def cleanup_extra_field(admin_mc):
eks_schema = admin_mc.client.by_id_dynamicSchema(
'amazonelasticcontainerserviceconfig')
delattr(eks_schema.resourceFields, 'mySpecialField')
admin_mc.client.delete(eks_schema)
admin_mc.client.create_dynamicSchema(eks_schema)
wait_for(lambda: not schema_has_field(admin_mc),
fail_handler=lambda: "could not clean up extra field",
timeout=120)
def schema_has_field(admin_mc):
admin_mc.client.reload_schema()
schemas = admin_mc.client.schema.types
eks_schema = None
for name, schema in schemas.items():
if name == "amazonElasticContainerServiceConfig":
eks_schema = schema
return hasattr(eks_schema.resourceFields,
'mySpecialField') and eks_schema.resourceFields[
'mySpecialField'] is not None
| 1,610 | 31.22 | 73 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_catalog.py
|
import pytest
import time
from rancher import ApiError
from .common import wait_for_template_to_be_created, \
wait_for_template_to_be_deleted, random_str, wait_for_atleast_workload
from .conftest import set_server_version, wait_for, DEFAULT_CATALOG
def test_catalog(admin_mc, remove_resource):
client = admin_mc.client
name1 = random_str()
name2 = random_str()
url1 = "https://github.com/StrongMonkey/charts-1.git"
url2 = "HTTP://github.com/StrongMonkey/charts-1.git"
catalog1 = client.create_catalog(name=name1,
branch="test",
url=url1,
)
remove_resource(catalog1)
catalog2 = client.create_catalog(name=name2,
branch="test",
url=url2,
)
remove_resource(catalog2)
wait_for_template_to_be_created(client, name1)
wait_for_template_to_be_created(client, name2)
client.delete(catalog1)
client.delete(catalog2)
wait_for_template_to_be_deleted(client, name1)
wait_for_template_to_be_deleted(client, name2)
def test_invalid_catalog_chars(admin_mc, remove_resource):
client = admin_mc.client
name = random_str()
url = "https://github.com/%0dStrongMonkey%0A/charts-1.git"
with pytest.raises(ApiError) as e:
catalog = client.create_catalog(name=name,
branch="test",
url=url,
)
remove_resource(catalog)
assert e.value.error.status == 422
assert e.value.error.message == "Invalid characters in catalog URL"
url = "https://github.com/StrongMonkey\t/charts-1.git"
with pytest.raises(ApiError) as e:
catalog = client.create_catalog(name=name,
branch="test",
url=url,
)
remove_resource(catalog)
assert e.value.error.status == 422
assert e.value.error.message == "Invalid characters in catalog URL"
def test_global_catalog_template_access(admin_mc, user_factory,
remove_resource):
client = admin_mc.client
user1 = user_factory()
remove_resource(user1)
name = random_str()
# Get all templates from library catalog that is enabled by default
updated = False
start = time.time()
interval = 0.5
while not updated:
time.sleep(interval)
interval *= 2
c = client.list_catalog(name="library").data[0]
if c.transitioning == "no":
updated = True
continue
if time.time() - start > 90:
raise AssertionError(
"Timed out waiting for catalog to stop transitioning")
existing = client.list_template(catalogId="library").data
templates = []
for t in existing:
templates.append("library-" + t.name)
url = "https://github.com/mrajashree/charts.git"
catalog = client.create_catalog(name=name,
branch="onlyOne",
url=url,
)
wait_for_template_to_be_created(client, name)
updated = False
start = time.time()
interval = 0.5
while not updated:
time.sleep(interval)
interval *= 2
c = client.list_catalog(name=name).data[0]
if c.transitioning == "no":
updated = True
continue
if time.time() - start > 90:
raise AssertionError(
"Timed out waiting for catalog to stop transitioning")
# Now list all templates of this catalog
new_templates = client.list_template(catalogId=name).data
for t in new_templates:
templates.append(name + "-" + t.name)
all_templates = existing + new_templates
# User should be able to list all these templates
user_client = user1.client
user_lib_templates = user_client.list_template(catalogId="library").data
user_new_templates = user_client.list_template(catalogId=name).data
user_templates = user_lib_templates + user_new_templates
assert len(user_templates) == len(all_templates)
client.delete(catalog)
wait_for_template_to_be_deleted(client, name)
def test_user_can_list_global_catalog(user_factory, remove_resource):
user1 = user_factory()
remove_resource(user1)
user_client = user1.client
c = user_client.list_catalog(name="library")
assert len(c) == 1
@pytest.mark.nonparallel
def test_template_version_links(admin_mc, admin_pc, custom_catalog,
remove_resource, restore_rancher_version):
"""Test that template versionLinks are being updated based off the rancher
version set on the server and the query paramater 'rancherVersion' being
set.
"""
# 1.6.0 uses 2.0.0-2.2.0
# 1.6.2 uses 2.1.0-2.3.0
client = admin_mc.client
c_name = random_str()
custom_catalog(name=c_name)
# Set the server expecting both versions
set_server_version(client, "2.1.0")
templates = client.list_template(
rancherVersion='2.1.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 2
assert '1.6.0' in templates.data[0]['versionLinks']
assert '1.6.2' in templates.data[0]['versionLinks']
# Set the server expecting only the older version
set_server_version(client, "2.0.0")
templates = client.list_template(
rancherVersion='2.0.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 1
assert '1.6.0' in templates.data[0]['versionLinks']
# Set the server expecting only the newer version
set_server_version(client, "2.3.0")
templates = client.list_template(
rancherVersion='2.3.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 1
assert '1.6.2' in templates.data[0]['versionLinks']
# Set the server expecting no versions, this should be outside both
# versions acceptable ranges
set_server_version(client, "2.4.0")
templates = client.list_template(
rancherVersion='2.4.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 0
def test_relative_paths(admin_mc, admin_pc, remove_resource):
""" This test adds a catalog's index.yaml with a relative chart url
and ensures that rancher can resolve the relative url"""
client = admin_mc.client
catalogname = "cat-" + random_str()
url = "https://raw.githubusercontent.com/rancher/integration-test-charts"\
"/relative-path"
catalog = client.create_catalog(catalogName=catalogname, branch="master",
url=url)
remove_resource(catalog)
catalog = client.reload(catalog)
assert catalog['url'] == url
# now deploy the app in the catalog to ensure we can resolve the tarball
ns = admin_pc.cluster.client.create_namespace(
catalogName="ns-" + random_str(),
projectId=admin_pc.project.id)
remove_resource(ns)
wait_for_template_to_be_created(client, catalog.id)
mysqlha = admin_pc.client.create_app(name="app-" + random_str(),
externalId="catalog://?catalog=" +
catalog.id +
"&template=mysql"
"&version=1.6.2",
targetNamespace=ns.name,
projectId=admin_pc.project.id)
remove_resource(mysqlha)
wait_for_atleast_workload(pclient=admin_pc.client, nsid=ns.id, timeout=60,
count=1)
def test_cannot_delete_system_catalog(admin_mc):
"""This test asserts that the system catalog cannot be delete"""
client = admin_mc.client
system_catalog = client.by_id_catalog("system-library")
with pytest.raises(ApiError) as e:
client.delete(system_catalog)
assert e.value.error.status == 422
assert e.value.error.message == 'not allowed to delete system-library' \
' catalog'
def test_system_catalog_missing_remove_link(admin_mc):
"""This test asserts that the remove link is missing from system-catalog's
links"""
client = admin_mc.client
system_catalog = client.by_id_catalog("system-library")
assert "remove" not in system_catalog.links
def test_cannot_update_system_if_embedded(admin_mc):
"""This test asserts that the system catalog cannot be updated if
system-catalog setting is set to 'bundled'"""
client = admin_mc.client
system_catalog_setting = client.by_id_setting("system-catalog")
# this could potentially interfere with other tests if they were to rely
# on system-catalog setting
client.update_by_id_setting(id=system_catalog_setting.id, value="bundled")
system_catalog = client.by_id_catalog("system-library")
with pytest.raises(ApiError) as e:
client.update_by_id_catalog(id=system_catalog.id, branch="asd")
assert e.value.error.status == 422
assert e.value.error.message == 'not allowed to edit system-library' \
' catalog'
def test_embedded_system_catalog_missing_edit_link(admin_mc):
"""This test asserts that the system catalog is missing the 'update' link
if system-catalog setting is set to 'bundled'"""
client = admin_mc.client
system_catalog_setting = client.by_id_setting("system-catalog")
# this could potentially interfere with other tests if they were to rely
# on system-catalog setting
client.update_by_id_setting(id=system_catalog_setting.id, value="bundled")
system_catalog = client.by_id_catalog("system-library")
assert "update" not in system_catalog.links
@pytest.mark.nonparallel
def test_catalog_refresh(admin_mc):
"""Test that on refresh the response includes the names of the catalogs
that are being refreshed"""
client = admin_mc.client
catalog = client.by_id_catalog("library")
out = client.action(obj=catalog, action_name="refresh")
assert out['catalogs'][0] == "library"
catalogs = client.list_catalog()
out = client.action(obj=catalogs, action_name="refresh")
# It just needs to be more than none, other test can add/remove catalogs
# so a hard count will break
assert len(out['catalogs']) > 0, 'no catalogs in response'
def test_invalid_catalog_chart_names(admin_mc, remove_resource):
"""Test chart with invalid name in catalog error properly
and test that a chart names are truncated and processed without
error"""
client = admin_mc.client
name = random_str()
catalog = client.create_catalog(name=name,
branch="broke-charts",
url=DEFAULT_CATALOG,
)
remove_resource(catalog)
wait_for_template_to_be_created(client, catalog.id)
def get_errored_catalog(catalog):
catalog = client.reload(catalog)
if catalog.transitioning == "error":
return catalog
return None
catalog = wait_for(lambda: get_errored_catalog(catalog),
fail_handler=lambda:
"catalog was not found in error state")
templates = client.list_template(catalogId=catalog.id).data
templatesString = ','.join([str(i) for i in templates])
assert "areallylongname" not in templatesString
assert "bad-chart_name" not in templatesString
assert catalog.state == "processed"
assert catalog.transitioning == "error"
assert "Error in chart(s):" in catalog.transitioningMessage
assert "bad-chart_name" in catalog.transitioningMessage
assert "areallylongname" in catalog.transitioningMessage
# this will break if github repo changes
assert len(templates) == 6
# checking that the errored catalog can be deleted successfully
client.delete(catalog)
wait_for_template_to_be_deleted(client, name)
assert not client.list_catalog(name=name).data
def test_invalid_catalog_chart_urls(admin_mc, remove_resource):
"""Test chart with file:// and local:// url paths"""
client = admin_mc.client
name = random_str()
catalog = client.create_catalog(name=name,
branch="invalid-urls",
url=DEFAULT_CATALOG,
)
remove_resource(catalog)
wait_for_template_to_be_created(client, catalog.id)
def get_errored_catalog(catalog):
catalog = client.reload(catalog)
if catalog.transitioning == "error":
return catalog
return None
catalog = wait_for(lambda: get_errored_catalog(catalog),
fail_handler=lambda:
"catalog was not found in error state")
templates = client.list_template(catalogId=catalog.id).data
templatesString = ','.join([str(i) for i in templates])
# url in index.yaml:
# local://azure-samples.github.io/helm-charts/aks-helloworld-0.1.0.tgz
assert "aks-goodbyeworld" not in templatesString
# url in index.yaml:
# file://azure-samples.github.io/helm-charts/aks-helloworld-0.1.0.tgz
assert "aks-helloworld" not in templatesString
assert catalog.state == "processed"
assert catalog.transitioning == "error"
assert "Error in chart(s):" in catalog.transitioningMessage
assert "aks-goodbyeworld" in catalog.transitioningMessage
assert "aks-helloworld" in catalog.transitioningMessage
# this will break if github repo changes
# valid url in index.yaml:
# https://azure-samples.github.io/helm-charts/azure-vote-0.1.0.tgz
assert len(templates) == 1
# checking that the errored catalog can be deleted successfully
client.delete(catalog)
wait_for_template_to_be_deleted(client, name)
assert not client.list_catalog(name=name).data
def test_catalog_has_helmversion(admin_mc, remove_resource):
"""Test to see that the helm version can be added to a catalog
on create and that the value is passed to the template"""
client = admin_mc.client
name1 = random_str()
name2 = random_str()
catalog1 = client.create_catalog(name=name1,
branch="master",
url=DEFAULT_CATALOG,
)
remove_resource(catalog1)
catalog2 = client.create_catalog(name=name2,
branch="master",
url=DEFAULT_CATALOG,
helmVersion="helm_v3"
)
remove_resource(catalog2)
wait_for_template_to_be_created(client, name1)
wait_for_template_to_be_created(client, name2)
assert "helm_v3" not in catalog1
assert catalog2.helmVersion == "helm_v3"
templates1 = client.list_template(catalogId=catalog1.id).data
for template in templates1:
assert "helmVersion" not in template.status
templates2 = client.list_template(catalogId=catalog2.id).data
for template in templates2:
assert "helmVersion" in template.status
assert template.status.helmVersion == "helm_v3"
def test_refresh_catalog_access(admin_mc, user_mc):
"""Tests that a user with standard access is not
able to refresh a catalog.
"""
catalog = admin_mc.client.by_id_catalog("library")
out = admin_mc.client.action(obj=catalog, action_name="refresh")
assert out['catalogs'][0] == "library"
# use catalog obj from admin client to get action not available to user
with pytest.raises(ApiError) as e:
user_mc.client.action(obj=catalog, action_name="refresh")
assert e.value.error.status == 404
| 16,016 | 37.970803 | 78 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_tokens.py
|
import pytest
import rancher
import requests
import time
from .conftest import BASE_URL, AUTH_URL, protect_response
def test_certificates(admin_mc):
client = admin_mc.client
tokens = client.list_token()
currentCount = 0
for t in tokens:
if t.current:
assert t.userId == admin_mc.user.id
currentCount += 1
assert currentCount == 1
def test_websocket(admin_mc):
client = rancher.Client(url=BASE_URL, token=admin_mc.client.token,
verify=False)
# make a request that looks like a websocket
client._session.headers["Connection"] = "upgrade"
client._session.headers["Upgrade"] = "websocket"
client._session.headers["Origin"] = "badStuff"
client._session.headers["User-Agent"] = "Mozilla"
# do something with client now that we have a "websocket"
with pytest.raises(rancher.ApiError) as e:
client.list_cluster()
assert e.value.error.Code.Status == 403
def test_api_token_ttl(admin_mc, remove_resource):
client = admin_mc.client
max_ttl = client.by_id_setting("auth-token-max-ttl-minutes")
max_ttl_mins = int(max_ttl["value"])
# api tokens must be created with min(input_ttl, max_ttl)
token = client.create_token(ttl=0)
remove_resource(token)
token_ttl_mins = mins(token["ttl"])
assert token_ttl_mins == max_ttl_mins
@pytest.mark.nonparallel
def test_kubeconfig_token_ttl(admin_mc, user_mc):
client = admin_mc.client
# delete existing kubeconfig token
kubeconfig_token_name = "kubeconfig-" + admin_mc.user.id
token = client.by_id_token(kubeconfig_token_name)
if token is not None:
client.delete(token)
# disable kubeconfig generation setting
client.update_by_id_setting(id="kubeconfig-generate-token", value="false")
# update kubeconfig ttl setting for test
kubeconfig_ttl_mins = 0.01
client.update_by_id_setting(
id="kubeconfig-token-ttl-minutes",
value=kubeconfig_ttl_mins)
# call login action for kubeconfig token
kubeconfig_token = login()
ttl1, token1 = get_token_and_ttl(kubeconfig_token)
assert ttl1 == kubeconfig_ttl_mins
# wait for token to expire
time.sleep(kubeconfig_ttl_mins*60)
# confirm new kubeconfig token gets generated
kubeconfig_token2 = login()
ttl2, token2 = get_token_and_ttl(kubeconfig_token2)
assert ttl2 == kubeconfig_ttl_mins
assert token1 != token2
# reset kubeconfig ttl setting
client.update_by_id_setting(id="kubeconfig-token-ttl-minutes",
value="960")
# enable kubeconfig generation setting
client.update_by_id_setting(id="kubeconfig-generate-token", value="true")
def login():
r = requests.post(AUTH_URL, json={
'username': 'admin',
'password': 'admin',
'responseType': 'kubeconfig',
}, verify=False)
protect_response(r)
return r.json()
def get_token_and_ttl(token):
token1_ttl_mins = mins(int(token["ttl"]))
token1_token = token["token"]
return token1_ttl_mins, token1_token
def mins(time_millisec):
return time_millisec / 60000
| 3,154 | 27.169643 | 78 |
py
|
rancher
|
rancher-master/tests/integration/suite/test_alert.py
|
import pytest
from rancher import ApiError
from .common import random_str
from .conftest import wait_for
from .alert_common import MockReceiveAlert
dingtalk_config = {
"type": "/v3/schemas/dingtalkConfig",
"url": "http://127.0.0.1:4050/dingtalk/test/",
}
microsoft_teams_config = {
"type": "/v3/schemas/msTeamsConfig",
"url": "http://127.0.0.1:4050/microsoftTeams",
}
MOCK_RECEIVER_ALERT_PORT = 4050
def test_alert_access(admin_mc, admin_pc, admin_cc, user_mc, remove_resource):
"""Tests that a user with read-only access is not
able to deactivate an alert.
"""
prtb = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_mc.user.id,
projectId=admin_pc.project.id,
roleTemplateId="read-only")
remove_resource(prtb)
# we get some project defaults, wait for them to come up
wait_for(projectAlertRules(user_mc.client),
fail_handler=lambda: "failed waiting for project alerts",
timeout=120)
# list with admin_mc to get action not available to user
alerts = admin_mc.client.list_projectAlertRule(
projectId=admin_pc.project.id
)
with pytest.raises(ApiError) as e:
user_mc.client.action(obj=alerts.data[0], action_name="deactivate")
assert e.value.error.status == 404
def projectAlertRules(client):
"""Wait for the crtb to have the userId populated"""
def cb():
return len(client.list_projectAlertRule().data) > 0
return cb
@pytest.fixture(scope="module")
def mock_receiver_alert():
server = MockReceiveAlert(port=MOCK_RECEIVER_ALERT_PORT)
server.start()
yield server
server.shutdown_server()
def test_add_notifier(admin_mc, remove_resource, mock_receiver_alert):
client = admin_mc.client
# Add the notifier dingtalk and microsoftTeams
notifier_dingtalk = client.create_notifier(name="dingtalk",
clusterId="local",
dingtalkConfig=dingtalk_config)
notifier_microsoft_teams = client.create_notifier(
name="microsoftTeams",
clusterId="local",
msteamsConfig=microsoft_teams_config)
client.action(obj=notifier_microsoft_teams,
action_name="send",
msteamsConfig=microsoft_teams_config)
client.action(obj=notifier_dingtalk,
action_name="send",
dingtalkConfig=dingtalk_config)
# Remove the notifiers
remove_resource(notifier_dingtalk)
remove_resource(notifier_microsoft_teams)
| 2,622 | 30.60241 | 78 |
py
|
rancher
|
rancher-master/tests/validation/tests/common.py
|
import os
import random
import time
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "")
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
USER_TOKEN = os.environ.get('USER_TOKEN', "None")
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
DEFAULT_TIMEOUT = 120
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
| 1,082 | 23.613636 | 70 |
py
|
rancher
|
rancher-master/tests/validation/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
rancher
|
rancher-master/tests/validation/tests/kubernetes_conformance/conftest.py
|
import os
import pytest
import random
from lib.aws import AmazonWebServices
from lib.rke_client import RKEClient
from lib.kubectl_client import KubectlClient
CLOUD_PROVIDER = os.environ.get("CLOUD_PROVIDER", 'AWS')
TEMPLATE_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'resources/rke_templates')
@pytest.fixture(scope='session')
def cloud_provider():
if CLOUD_PROVIDER == 'AWS':
return AmazonWebServices()
@pytest.fixture(scope='function')
def rke_client(cloud_provider):
return RKEClient(
master_ssh_key_path=cloud_provider.master_ssh_key_path,
template_path=TEMPLATE_PATH)
@pytest.fixture(scope='function')
def kubectl():
return KubectlClient()
@pytest.fixture(scope='function')
def test_name(request):
name = request.function.__name__.replace('test_', '').replace('_', '-')
# limit name length
name = name[0:20] if len(name) > 20 else name
return '{0}-{1}'.format(name, random.randint(100000, 1000000))
| 994 | 24.512821 | 75 |
py
|
rancher
|
rancher-master/tests/validation/tests/kubernetes_conformance/test_rke_k8s_conformance.py
|
import os
import time
from .conftest import * # NOQA
from tests.rke.common import create_rke_cluster, delete_nodes
KUBE_CONFIG_PATH = os.environ.get(
'KUBE_CONFIG_PATH', 'kube_config_cluster.yml')
CONFORMANCE_DONE = "no-exit was specified, sonobuoy is now blocking"
def extract_file_results_path(logs):
log_lines = logs.splitlines()
for line in log_lines:
if "Results available at" in line:
path_line = line.split(' ')
abs_file_path = path_line[-1].replace('"', '')
return abs_file_path
else:
raise Exception(
"Unable to find test result file in logs: {0}".format(logs))
def delete_all_jobs(kubectl):
namespaces = kubectl.list_namespaces()
for namespace in namespaces:
result = kubectl.delete_resourse("jobs", namespace=namespace, all=True)
assert result.ok, "{}".format(result)
def run_conformance(kubectl, kube_config):
kubectl.kube_config_path = kube_config
delete_all_jobs(kubectl)
kubectl.apply_conformance_tests()
kubectl.wait_for_pod('sonobuoy', namespace='sonobuoy')
conformance_tests_complete = False
while not conformance_tests_complete:
result = kubectl.logs(
'sonobuoy', namespace='sonobuoy', tail=10)
assert result.ok, (
"Failed to read logs for conformance tests pod:\n{0}".format(
result.stdout + result.stderr))
if CONFORMANCE_DONE in result.stdout:
break
time.sleep(60)
test_results_path = extract_file_results_path(result.stdout)
result = kubectl.cp_from_pod('sonobuoy', 'sonobuoy', test_results_path,
'./conformance_results.tar.gz')
assert result.ok, "{}".format(result)
def test_run_conformance_from_config(kubectl):
"""
Runs conformance tests against an existing cluster
"""
run_conformance(kubectl, KUBE_CONFIG_PATH)
def test_create_cluster_run_conformance(
test_name, cloud_provider, rke_client, kubectl):
"""
Creates an RKE cluster, runs conformance tests against that cluster
"""
rke_template = 'cluster_install_config_1.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_rke_cluster(rke_client, kubectl, nodes, rke_template)
run_conformance(kubectl, rke_client.kube_config_path())
delete_nodes(cloud_provider, nodes)
| 2,402 | 32.84507 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/kubernetes_conformance/__init__.py
| 0 | 0 | 0 |
py
|
|
rancher
|
rancher-master/tests/validation/tests/rke/test_install_rbac.py
|
from .conftest import * # NOQA
from .common import * # NOQA
@pytest.mark.skip("Use as an example of how to test RBAC")
def test_install_rbac_1(test_name, cloud_provider, rke_client, kubectl):
"""
Create a three node cluster and runs validation to create pods
Removes cluster and validates components are removed
"""
rke_template = 'cluster_install_rbac_1.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_rke_cluster(rke_client, kubectl, nodes, rke_template)
result = kubectl.create_resourse_from_yml(
'resources/k8s_ymls/daemonset_pods_per_node.yml', namespace='default')
assert result.ok, result.stderr
kubectl.create_ns('outside-role')
result = kubectl.create_resourse_from_yml(
'resources/k8s_ymls/daemonset_pods_per_node.yml',
namespace='outside-role')
assert result.ok, result.stderr
# Create role and rolebinding to user1 in namespace 'default'
# namespace is coded in role.yml and rolebinding.yml
result = kubectl.create_resourse_from_yml('resources/k8s_ymls/role.yml')
assert result.ok, result.stderr
result = kubectl.create_resourse_from_yml(
'resources/k8s_ymls/rolebinding.yml')
assert result.ok, result.stderr
# verify read in namespace
admin_call_pods = kubectl.get_resource('pods', namespace='default')
user_call_pods = kubectl.get_resource(
'pods', as_user='user1', namespace='default')
# Make sure the number of pods returned with out user is the same as user
# for this namespace
assert len(admin_call_pods['items']) > 0, "Pods should be greater than 0"
assert (len(admin_call_pods['items']) == len(user_call_pods['items'])), (
"Did not retrieve correct number of pods for 'user1'. Expected {0},"
"Retrieved {1}".format(
len(admin_call_pods['items']), len(user_call_pods['items'])))
# verify restrictions no pods return in get pods in different namespaces
user_call_pods = kubectl.get_resource(
'pods', as_user='user1', namespace='outside-role')
assert len(user_call_pods['items']) == 0, (
"Should not be able to get pods outside of defined user1 namespace")
# verify create fails as user for any namespace
result = kubectl.run(test_name + '-pod2', image='nginx', as_user='user1',
namespace='outside-role')
assert result.ok is False, (
"'user1' should not be able to create pods in other namespaces:\n{0}"
.format(result.stdout + result.stderr))
assert "cannot create" in result.stdout + result.stderr
result = kubectl.run(test_name + '-pod3', image='nginx', as_user='user1',
namespace='default')
assert result.ok is False, (
"'user1' should not be able to create pods in its own namespace:\n{0}"
.format(result.stdout + result.stderr))
assert "cannot create" in result.stdout + result.stderr
for node in nodes:
cloud_provider.delete_node(node)
| 3,017 | 43.382353 | 78 |
py
|
rancher
|
rancher-master/tests/validation/tests/rke/conftest.py
|
import os
import pytest
import random
from lib.aws import AmazonWebServices
from lib.rke_client import RKEClient
from lib.kubectl_client import KubectlClient
CLOUD_PROVIDER = os.environ.get("CLOUD_PROVIDER", 'AWS')
TEMPLATE_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'resources/rke_templates')
@pytest.fixture(scope='session')
def cloud_provider():
if CLOUD_PROVIDER == 'AWS':
return AmazonWebServices()
@pytest.fixture(scope='function')
def rke_client(cloud_provider):
return RKEClient(
master_ssh_key_path=cloud_provider.master_ssh_key_path,
template_path=TEMPLATE_PATH)
@pytest.fixture(scope='function')
def kubectl():
return KubectlClient()
@pytest.fixture(scope='function')
def test_name(request):
name = request.function.__name__.replace('test_', '').replace('_', '-')
# limit name length
name = name[0:20] if len(name) > 20 else name
return '{0}-{1}'.format(name, random.randint(100000, 1000000))
| 994 | 24.512821 | 75 |
py
|
rancher
|
rancher-master/tests/validation/tests/rke/test_upgrade.py
|
import os
from .conftest import * # NOQA
from .common import * # NOQA
import pytest
K8S_PREUPGRADE_IMAGE = os.environ.get(
'RANCHER_K8S_PREUPGRADE_IMAGE', 'v1.16.8-rancher1-3')
K8S_UPGRADE_IMAGE = os.environ.get(
'RANCHER_K8S_UPGRADE_IMAGE', 'v1.17.4-rancher1-3')
def test_upgrade_1(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster k8s service images, cluster config:
node0 - controlplane, etcd
node1 - worker
node2 - worker
"""
print(K8S_UPGRADE_IMAGE)
print(K8S_PREUPGRADE_IMAGE)
rke_template = 'cluster_upgrade_1_1.yml.j2'
all_nodes = cloud_provider.create_multiple_nodes(3, test_name)
rke_config = create_rke_cluster(
rke_client, kubectl, all_nodes, rke_template,
k8_rancher_image=K8S_PREUPGRADE_IMAGE)
network, dns_discovery = validate_rke_cluster(
rke_client, kubectl, all_nodes, 'beforeupgrade')
validate_k8s_service_images(all_nodes, K8S_PREUPGRADE_IMAGE,
rke_client, kubectl)
# New cluster needs to keep controlplane and etcd nodes the same
rke_config = create_rke_cluster(
rke_client, kubectl, all_nodes, rke_template,
k8_rancher_image=K8S_UPGRADE_IMAGE)
# The updated images versions need to be validated
validate_k8s_service_images(all_nodes, K8S_UPGRADE_IMAGE,
rke_client, kubectl)
# Rerun validation on existing and new resources
validate_rke_cluster(
rke_client, kubectl, all_nodes, 'beforeupgrade',
network_validation=network, dns_validation=dns_discovery)
validate_rke_cluster(
rke_client, kubectl, all_nodes, 'afterupgrade')
delete_nodes(cloud_provider, all_nodes)
def test_upgrade_2(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster k8s service images and add worker node, cluster config:
node0 - controlplane, etcd
node1 - worker
node2 - worker
Upgrade adds a worker node:
node0 - controlplane, etcd
node1 - worker
node2 - worker
node3 - worker
"""
rke_template = 'cluster_upgrade_2_1.yml.j2'
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
before_upgrade_nodes = all_nodes[0:-1]
rke_config = create_rke_cluster(
rke_client, kubectl, before_upgrade_nodes, rke_template,
k8_rancher_image=K8S_PREUPGRADE_IMAGE)
network, dns_discovery = validate_rke_cluster(
rke_client, kubectl, before_upgrade_nodes, 'beforeupgrade')
validate_k8s_service_images(before_upgrade_nodes, K8S_PREUPGRADE_IMAGE,
rke_client, kubectl)
# New cluster needs to keep controlplane and etcd nodes the same
rke_template = 'cluster_upgrade_2_2.yml.j2'
rke_config = create_rke_cluster(
rke_client, kubectl, all_nodes, rke_template,
k8_rancher_image=K8S_UPGRADE_IMAGE)
validate_k8s_service_images(all_nodes, K8S_UPGRADE_IMAGE,
rke_client, kubectl)
# Rerun validation on existing and new resources
validate_rke_cluster(
rke_client, kubectl, all_nodes, 'beforeupgrade',
network_validation=network, dns_validation=dns_discovery)
validate_rke_cluster(
rke_client, kubectl, all_nodes, 'afterupgrade')
delete_nodes(cloud_provider, all_nodes)
def test_upgrade_3(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster k8s service images and remove worker node, cluster config:
node0 - controlplane, etcd
node1 - worker
node2 - worker
Upgrade removes a worker node:
node0 - controlplane, etcd
node1 - worker
"""
rke_template = 'cluster_upgrade_3_1.yml.j2'
all_nodes = cloud_provider.create_multiple_nodes(3, test_name)
rke_config = create_rke_cluster(
rke_client, kubectl, all_nodes, rke_template,
k8_rancher_image=K8S_PREUPGRADE_IMAGE)
network, dns_discovery = validate_rke_cluster(
rke_client, kubectl, all_nodes, 'beforeupgrade')
validate_k8s_service_images(all_nodes, K8S_PREUPGRADE_IMAGE,
rke_client, kubectl)
# New cluster needs to keep controlplane and etcd nodes the same
rke_template = 'cluster_upgrade_3_2.yml.j2'
after_upgrade_nodes = all_nodes[0:-1]
rke_config = create_rke_cluster(
rke_client, kubectl, after_upgrade_nodes, rke_template,
k8_rancher_image=K8S_UPGRADE_IMAGE)
validate_k8s_service_images(after_upgrade_nodes, K8S_UPGRADE_IMAGE,
rke_client, kubectl)
# Rerun validation on existing and new resources
validate_rke_cluster(
rke_client, kubectl, after_upgrade_nodes, 'beforeupgrade',
network_validation=network, dns_validation=dns_discovery)
validate_rke_cluster(
rke_client, kubectl, after_upgrade_nodes, 'afterupgrade')
delete_nodes(cloud_provider, all_nodes)
@pytest.mark.skipif(
True, reason="This test is skipped for now")
def test_upgrade_4(test_name, cloud_provider, rke_client, kubectl):
"""
Update only one service in cluster k8s system images, cluster config:
node0 - controlplane, etcd
node1 - worker
node2 - worker
"""
rke_template = 'cluster_upgrade_4_1.yml.j2'
all_nodes = cloud_provider.create_multiple_nodes(3, test_name)
rke_config = create_rke_cluster(
rke_client, kubectl, all_nodes, rke_template,
k8_rancher_image=K8S_PREUPGRADE_IMAGE)
network, dns_discovery = validate_rke_cluster(
rke_client, kubectl, all_nodes, 'beforeupgrade')
validate_k8s_service_images(all_nodes, K8S_PREUPGRADE_IMAGE,
rke_client, kubectl)
# Upgrade only the scheduler, yaml will replace `upgrade_k8_rancher_image`
# for scheduler image only, the rest will keep pre-upgrade image
rke_config = create_rke_cluster(
rke_client, kubectl, all_nodes, rke_template,
k8_rancher_image=K8S_PREUPGRADE_IMAGE,
upgrade_k8_rancher_image=K8S_UPGRADE_IMAGE)
validate_k8s_service_images(all_nodes, K8S_UPGRADE_IMAGE,
rke_client, kubectl)
# Rerun validation on existing and new resources
validate_rke_cluster(
rke_client, kubectl, all_nodes, 'beforeupgrade',
network_validation=network, dns_validation=dns_discovery)
validate_rke_cluster(
rke_client, kubectl, all_nodes, 'afterupgrade')
delete_nodes(cloud_provider, all_nodes)
| 6,472 | 38.230303 | 78 |
py
|
rancher
|
rancher-master/tests/validation/tests/rke/test_install_roles.py
|
from .conftest import * # NOQA
from .common import * # NOQA
def test_install_roles_1(test_name, cloud_provider, rke_client, kubectl):
"""
Create cluster with single node with roles controlplane, worker, etcd
"""
rke_template = 'cluster_install_roles_1.yml.j2'
nodes = cloud_provider.create_multiple_nodes(1, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_roles_2(test_name, cloud_provider, rke_client, kubectl):
"""
Create cluster with 3 nodes having each with single role:
node0 - controlplane, node1 - etcd, node2 - worker
"""
rke_template = 'cluster_install_roles_2.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_roles_3(test_name, cloud_provider, rke_client, kubectl):
"""
Create cluster with 3 nodes having all three roles
"""
rke_template = 'cluster_install_roles_3.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_roles_4(test_name, cloud_provider, rke_client, kubectl):
"""
Create a 4 node node cluster with nodes having these roles:
node0 - etcd, controlplane
node1 - etcd, worker
node2 - controlplane, worker
node3 - controlplane, etcd, worker
"""
rke_template = 'cluster_install_roles_4.yml.j2'
nodes = cloud_provider.create_multiple_nodes(4, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
| 1,800 | 33.634615 | 73 |
py
|
rancher
|
rancher-master/tests/validation/tests/rke/test_update_roles.py
|
from .conftest import * # NOQA
from .common import * # NOQA
def test_update_roles_1(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a worker node
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a worker
node0 - controlplane
node1 - etcd
node2 - worker
node3 - worker
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
# Only use three nodes at first
before_update_nodes = all_nodes[0:-1]
rke_template = 'cluster_update_roles_1_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding worker node, rerun on existing validation pods
rke_template = 'cluster_update_roles_1_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_2(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a worker node, then remove original worker node
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a worker
node0 - controlplane
node1 - etcd
node2 - worker <- will be deleted on next update
node3 - worker
After 2nd Update: Deletes original worker
node0 - controlplane
node1 - etcd
node3 - worker
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
before_update_nodes = all_nodes[0:-1]
removed_node_nodes = all_nodes[0:2] + all_nodes[3:]
# all_nodes[0:2] = [node0, node1]
# all_nodes[3:] = [node3]
# Only use three nodes at first
rke_template = 'cluster_update_roles_2_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding worker node, rerun on existing validation pods
rke_template = 'cluster_update_roles_2_2.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
network_update1, dns_discovery_update1 = validate_rke_cluster(
rke_client, kubectl, all_nodes, 'afterupdate1')
# Update removing original worker node, rerun on existing validation pods
rke_template = 'cluster_update_roles_2_3.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, removed_node_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate1',
network_validation=network_update1,
dns_validation=dns_discovery_update1)
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate2')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_3(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a controlplane node
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a controlplane
node0 - controlplane
node1 - etcd
node2 - worker
node3 - controlplane
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
before_update_nodes = all_nodes[0:-1] # only use three nodes at first
# Only use three nodes at first
rke_template = 'cluster_update_roles_3_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding controlplane node, rerun on existing validation pods
rke_template = 'cluster_update_roles_3_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_4(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a controlplane node, remove original controlplane
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a controlplane
node0 - controlplane <- will be deleted on next update
node1 - etcd
node2 - worker
node3 - controlplane
After 2nd Update: Deletes original controlplane
node1 - etcd
node2 - worker
node3 - controlplane
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
before_update_nodes = all_nodes[0:-1] # only use three nodes at first
removed_node_nodes = all_nodes[1:]
rke_template = 'cluster_update_roles_4_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding controlplane node, rerun on existing validation pods
rke_template = 'cluster_update_roles_4_2.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
network_update1, dns_discovery_update1 = validate_rke_cluster(
rke_client, kubectl, all_nodes, 'afterupdate1')
# Update removing original controlplane node
# rerun on existing validation pods
# all_nodes[1:] = [node1, node2, node3]
rke_template = 'cluster_update_roles_4_3.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, removed_node_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate1',
network_validation=network_update1,
dns_validation=dns_discovery_update1)
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate2')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_5(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a etcd node to a single node cluster
Before Update: Create single node cluster with all roles
node0 - controlplane, etcd, worker
After Update: Adds a etcd
node0 - controlplane, etcd, worker
node1 - etcd
"""
all_nodes = cloud_provider.create_multiple_nodes(2, test_name)
before_update_nodes = all_nodes[0:-1] # only use one node at first
rke_template = 'cluster_update_roles_5_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding etcd node, rerun on existing validation pods
rke_template = 'cluster_update_roles_5_2.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_6(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding two etcd nodes to a single node cluster
Before Update: Create single node cluster with all roles
node0 - controlplane, etcd, worker
After Update: Adds two etcd nodes
node0 - controlplane, etcd, worker
node1 - etcd
node2 - etcd
"""
all_nodes = cloud_provider.create_multiple_nodes(3, test_name)
before_update_nodes = all_nodes[0:-2] # only use one node at first
rke_template = 'cluster_update_roles_6_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding 2 etcd nodes, rerun on existing validation pods
rke_template = 'cluster_update_roles_6_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_7(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster deleting one node with all roles in three node cluster
Before Update: Create three node cluster with all roles
node0 - controlplane, etcd, worker
node1 - worker
node2 - etcd
After Update: Remove last node
node0 - controlplane, etcd, worker
node1 - worker
"""
all_nodes = cloud_provider.create_multiple_nodes(3, test_name)
removed_node_nodes = all_nodes[0:-1]
rke_template = 'cluster_update_roles_7_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate')
# Update remove etcd node, rerun on existing validation pods
rke_template = 'cluster_update_roles_7_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, removed_node_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_8(test_name, cloud_provider, rke_client, kubectl):
"""
Create a single node cluster, add second node with all roles, and then
delete the original node
Before Update: Create single node cluster with all roles
node0 - controlplane, etcd, worker
After Update: Add second node with all roles
node0 - controlplane, etcd, worker
node1 - controlplane, etcd, worker
After second Update: Remove original node0
node1 - controlplane, etcd, worker
"""
all_nodes = cloud_provider.create_multiple_nodes(2, test_name)
before_update_nodes = all_nodes[0:-1]
removed_node_nodes = all_nodes[1:]
# Inital cluster
rke_template = 'cluster_update_roles_8_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update create a second node will all roles
rke_template = 'cluster_update_roles_8_2.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
network_update1, dns_discovery_update1 = validate_rke_cluster(
rke_client, kubectl, all_nodes, 'afterupdate1')
# Update remove original node with all roles
rke_template = 'cluster_update_roles_8_1.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, removed_node_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate1',
network_validation=network_update1,
dns_validation=dns_discovery_update1)
# Create another validation setup on updated cluster
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate2')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_9(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a controlplane,worker node
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a controlplane, worker
node0 - controlplane
node1 - etcd
node2 - worker
node3 - controlplane, worker
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
before_update_nodes = all_nodes[0:-1] # only use three nodes at first
# Inital cluster
rke_template = 'cluster_update_roles_9_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adds node with roles [controlplane,worker]
rke_template = 'cluster_update_roles_9_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_10(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a controlplane,worker node
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a controlplane, etcd
node0 - controlplane
node1 - etcd
node2 - worker
node3 - controlplane, etcd
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
before_update_nodes = all_nodes[0:-1] # only use three nodes at first
# Inital cluster
rke_template = 'cluster_update_roles_10_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adds node with roles [controlplane,etcd]
rke_template = 'cluster_update_roles_10_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_11(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a worker, etcd node
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a etcd, worker
node0 - controlplane
node1 - etcd
node2 - worker
node3 - worker, etcd
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
before_update_nodes = all_nodes[0:-1] # only use three nodes at first
# Inital cluster
rke_template = 'cluster_update_roles_11_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adds node with roles [worker,etcd]
rke_template = 'cluster_update_roles_11_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_12(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a controlplane,worker node
Before Update:
node0 - etcd
node1 - worker
node2 - controlplane
node3 - controlplane
node4 - controlplane
After Update: Adds a controlplane, worker
node0 - etcd
node1 - worker
node5 - controlplane <- New contolplane node
"""
all_nodes = cloud_provider.create_multiple_nodes(6, test_name)
before_update_nodes = all_nodes[0:-1] # only use five nodes at first
# all_nodes[0:2] = [node0, node1]
# all_nodes[5:] = [node5]
removed_node_nodes = all_nodes[0:2] + all_nodes[5:]
# Inital cluster
rke_template = 'cluster_update_roles_12_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update removes all existing controlplane nodes,
# adds node with [controlplane]
rke_template = 'cluster_update_roles_12_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, removed_node_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_13(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster changing a worker role to controlplane
Before Update: Create four node cluster, each node with these roles:
node0 - controlplane
node1 - controlplane, etcd
node2 - worker, etcd
node3 - worker, etcd
After Update: Change node2 worker to controlplane
node0 - controlplane
node1 - controlplane, etcd
node2 - controlplane, etcd
node3 - worker, etcd
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
rke_template = 'cluster_update_roles_13_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate')
# Update changes role worker to controlplane on node2
rke_template = 'cluster_update_roles_13_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_14(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster changing a controlplane, etcd to etcd only
Before Update: Create four node cluster, each node with these roles:
node0 - controlplane
node1 - controlplane, etcd
node2 - worker, etcd
node3 - worker, etcd
After Update: Change node1 controlplane, etcd to etcd
node0 - controlplane
node1 - etcd
node2 - worker, etcd
node3 - worker, etcd
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
rke_template = 'cluster_update_roles_14_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate')
# Update remove controlplane on node1
rke_template = 'cluster_update_roles_14_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_15(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster remove worker role from [worker,etcd] node
Before Update: Create four node cluster, each node with these roles:
node0 - controlplane
node1 - controlplane, etcd
node2 - worker, etcd
node3 - worker, etcd
After Update: Change remove worker role node2
node0 - controlplane
node1 - controlplane, etcd
node2 - etcd
node3 - worker, etcd
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
rke_template = 'cluster_update_roles_15_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate')
# Update remove worker role node2
rke_template = 'cluster_update_roles_15_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
| 21,968 | 38.300537 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/rke/common.py
|
import time
import os
import json
k8s_resource_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"resources/k8s_ymls/")
# Global expectedimagesdict declared to store the images for a specific
# k8s Version
expectedimagesdict = {}
def create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
base_namespace="ns", network_validation=None, dns_validation=None,
teardown=False, remove_nodes=False, etcd_private_ip=False):
create_rke_cluster(rke_client, kubectl, nodes, rke_template)
network_validation, dns_validation = validate_rke_cluster(
rke_client, kubectl, nodes, base_ns=base_namespace,
network_validation=network_validation, dns_validation=dns_validation,
teardown=teardown, etcd_private_ip=etcd_private_ip)
if remove_nodes:
delete_nodes(cloud_provider, nodes)
return network_validation, dns_validation
def delete_nodes(cloud_provider, nodes):
for node in nodes:
cloud_provider.delete_node(node)
def create_rke_cluster(
rke_client, kubectl, nodes, rke_template, **rke_template_kwargs):
"""
Creates a cluster and returns the rke config as a python dictionary
"""
# create rke cluster yml
config_yml, nodes = rke_client.build_rke_template(
rke_template, nodes, **rke_template_kwargs)
# run rke up
result = rke_client.up(config_yml)
# validate k8s reachable
kubectl.kube_config_path = rke_client.kube_config_path()
return rke_client.convert_to_dict(config_yml)
def validate_rke_cluster(rke_client, kubectl, nodes, base_ns='one',
network_validation=None, dns_validation=None,
teardown=False, etcd_private_ip=False):
"""
General rke up test validation, runs validations methods for:
- node roles validation
- intercommuincation per pod
- dns service discovery validation
If teardown is true, removes any resources created for validation
"""
validation_node_roles(nodes, kubectl.get_nodes(), etcd_private_ip)
if network_validation is None:
network_validation = PodIntercommunicationValidation(kubectl, base_ns)
network_validation.setup()
if dns_validation is None:
dns_validation = DNSServiceDiscoveryValidation(kubectl, base_ns)
dns_validation.setup()
network_validation.validate()
dns_validation.validate()
if teardown:
network_validation.teardown()
dns_validation.teardown()
return network_validation, dns_validation
def match_nodes(nodes, k8s_nodes):
"""
Builds a list of tuples, where:
nodes_to_k8s_nodes[0][0] is the node object matched to
nodes_to_k8s_nodes[0][1] is the k8s info for the same node
"""
k8s_node_names = []
for k8s_node in k8s_nodes['items']:
k8s_node_names.append(
k8s_node['metadata']['labels']['kubernetes.io/hostname'])
nodes_to_k8s_nodes = []
for node in nodes:
for k8s_node in k8s_nodes['items']:
hostname = k8s_node['metadata']['labels']['kubernetes.io/hostname']
if hostname in node.node_name:
nodes_to_k8s_nodes.append((node, k8s_node))
break
else:
raise Exception(
"Did not find provisioned node's '{0}' corresponding nodes "
"resourse in cluster: {1}".format(
node.node_name, k8s_node_names))
return nodes_to_k8s_nodes
def assert_containers_exist_for_roles(roles, containers):
# All nodes will have these containers:
expect_containers = ['kubelet', 'kube-proxy']
# Add extra containers depending on roles present
if 'controlplane' in roles:
expect_containers.extend(
['kube-scheduler', 'kube-controller-manager', 'kube-apiserver'])
else:
expect_containers.extend(['nginx-proxy'])
if 'etcd' in roles:
expect_containers.extend(['etcd'])
missing_containers = expect_containers[:]
for container in containers:
if container in expect_containers:
missing_containers.remove(container)
assert len(missing_containers) == 0, \
"Missing expected containers for role '{0}': {1}".format(
roles, missing_containers)
def wait_for_etcd_cluster_health(node, etcd_private_ip=False):
result = ''
endpoints = "127.0.0.1"
if etcd_private_ip:
endpoints = node.private_ip_address
etcd_tls_cmd = (
'ETCDCTL_API=2 etcdctl --endpoints "https://'+endpoints+':2379" '
' --ca-file /etc/kubernetes/ssl/kube-ca.pem --cert-file '
' $ETCDCTL_CERT --key-file '
' $ETCDCTL_KEY cluster-health')
print(etcd_tls_cmd)
start_time = time.time()
while start_time - time.time() < 120:
result = node.docker_exec('etcd', "sh -c '" + etcd_tls_cmd + "'")
print("**RESULT**")
print(result)
if 'cluster is healthy' in result:
break
time.sleep(5)
return result
def verify_metrics_server_addon_images(k8sversion, kubectl,
namespace, selector):
metricserver = get_component_version(k8sversion,
"rancher/metrics-server")
# Sleep to allow the metrics server component to get to running state
time.sleep(10)
verify_component_status_with_kubectl(kubectl, namespace, selector,
metricserver)
def verify_ingress_addon_images(k8sversion, kubectl, namespace,
selector1, selector2):
ingressdefaultbackend = \
get_component_version(k8sversion,
"rancher/nginx-ingress-controller-defaultbackend"
)
nginxingresscontoller =\
get_component_version(k8sversion,
"rancher/nginx-ingress-controller")
# Sleep to allow the ingress addon components to get to running state
time.sleep(5)
verify_component_status_with_kubectl(kubectl, namespace, selector1,
nginxingresscontoller)
verify_component_status_with_kubectl(kubectl, namespace, selector2,
ingressdefaultbackend)
def verify_dns_addon_images(k8sversion, kubectl, namespace,
selector):
coredns = get_component_version(k8sversion,
"rancher/coredns-coredns")
# Sleep to allow the dns addon component to get to running state
time.sleep(5)
verify_component_status_with_kubectl(kubectl, namespace, selector, coredns)
def verify_networking_addon_images(k8sversion, kubectl,
namespace, selector):
flannel = get_component_version(k8sversion,
"rancher/coreos-flannel")
calico = get_component_version(k8sversion,
"rancher/calico-node")
# Sleep to allow the network addon component to get to running state
time.sleep(5)
verify_component_status_with_kubectl(kubectl, namespace, selector, calico,
flannel)
def verify_component_status_with_kubectl(kubectl, namespace, selector, *args):
# Method to verify addon status and images
command = "get pod --namespace " + namespace + " -l " + selector
res = kubectl.execute_kubectl_cmd(command, json_out=True)
result = json.loads(res)
timeout = 180
start = time.time()
# Check if the pod is running
for pod in result["items"]:
podstatus = pod["status"]["phase"]
podname = pod["metadata"]["name"]
print("Pod name is " + podname)
podreloadcommand = "get pod " + podname + " --namespace " + namespace
while (podstatus != "Running"):
if time.time() - start > timeout:
raise AssertionError("Timed out waiting to reach running state")
time.sleep(.5)
podresult = kubectl.execute_kubectl_cmd(podreloadcommand,
json_out=True)
podresult = json.loads(podresult)
podname = podresult["metadata"]["name"]
print("Pod name is " + podname)
podstatus = podresult["status"]["phase"]
print("Pod status is " + podstatus)
assert True
# Verify the component images in the pods
testresult = kubectl.execute_kubectl_cmd(command, json_out=True)
updatedresult = json.loads(testresult)
for pod in updatedresult["items"]:
print("Required Resource Image: ")
print(args[0])
podstatus = pod["status"]["phase"]
if(podstatus == "Running"):
for i in range(0, len(args)):
print(pod["status"]["containerStatuses"][i]["image"])
assert pod["status"]["containerStatuses"][i]["image"] == args[i]
def get_system_images(rke_client, k8s_version):
# Method to obtain the system images for a k8s version from rke cli
command = ("rke config --system-images --version " + k8s_version)
print(command)
rke_system_images_dict = rke_client.run_command(command)
result = rke_system_images_dict.split("\n")
# Removing the first item which is not required
result.pop(0)
print(result)
return result
def get_component_version(k8s_version, componentname):
# Method to obtain the image version for a specific component
systemimageslist = expectedimagesdict[k8s_version]["rkesystemimages"]
print(systemimageslist)
for item in systemimageslist:
itemlist = item.split(":")
if componentname == itemlist[0]:
print(componentname)
componentversion = item
print("VERSION IS " + componentversion)
return componentversion
def build_expectedimages_dict(k8s_version, rke_client):
# Build the expected image list from rke system images list
if k8s_version in expectedimagesdict.keys():
return expectedimagesdict[k8s_version]
else:
expectedimagesdict[k8s_version] = {}
result = get_system_images(rke_client, k8s_version)
for item in result:
itemlist = item.split(":")
if "rancher/hyperkube" == itemlist[0]:
expectedimagesdict[k8s_version]["kube-proxy"] = item
expectedimagesdict[k8s_version]["kube-scheduler"] = item
expectedimagesdict[k8s_version]["kube-controller-manager"] \
= item
expectedimagesdict[k8s_version]["kube-apiserver"] = item
expectedimagesdict[k8s_version]["kubelet"] = item
if "rancher/coreos-etc[k8s_version]" == itemlist[0]:
expectedimagesdict[k8s_version]["etcd"] = item
if "rancher/rke-tools" == itemlist[0]:
expectedimagesdict[k8s_version]["service-sidekick"] = item
expectedimagesdict[k8s_version]["rkesystemimages"] = result
return expectedimagesdict[k8s_version]
def validation_node_roles(nodes, k8s_nodes, etcd_private_ip=False):
"""
Validates each node's labels for match its roles
Validates each node's running containers match its role
Validates etcd etcdctl cluster-health command
Validates worker nodes nginx-proxy conf file for controlplane ips
"""
role_matcher = {
'worker': 'node-role.kubernetes.io/worker',
'etcd': 'node-role.kubernetes.io/etcd',
'controlplane': 'node-role.kubernetes.io/controlplane'}
controlplane_ips = []
etcd_members = []
for node in nodes:
if 'controlplane' in node.roles:
controlplane_ips.append(node.node_address)
if 'etcd' in node.roles:
etcd_members.append(node.node_address)
nodes_to_k8s_nodes = match_nodes(nodes, k8s_nodes)
for node, k8s_node in nodes_to_k8s_nodes:
containers = list(node.docker_ps().keys())
assert_containers_exist_for_roles(node.roles, containers)
k8s_node_labels = list(k8s_node['metadata']['labels'].keys())
for role in node.roles:
assert role_matcher[role] in k8s_node_labels, \
"Expected label '{0}' not in labels: {1}".format(
role_matcher[role], k8s_node_labels)
# nodes with controlplane roles do not have nginx-proxy
if (role == 'worker' or role == 'etcd') and \
('controlplane' not in node.roles):
result = node.docker_exec(
'nginx-proxy', 'cat /etc/nginx/nginx.conf')
for ip in controlplane_ips:
assert 'server {0}:6443'.format(ip) in result, (
"Expected to find all controlplane node addresses {0}"
"in nginx.conf: {1}".format(controlplane_ips, result))
if role == 'etcd':
if len(node.roles) == 1:
for taint in k8s_node['spec']['taints']:
if taint['key'] == 'node-role.kubernetes.io/etcd':
assert taint['effect'] == 'NoExecute', (
"{0} etcd-only node's taint is not 'NoExecute'"
": {1}".format(node.node_name, taint['effect'])
)
# found, do not complete for loop
# or else an assertion will be raised
break
else:
assert False, \
"Expected to find taint for etcd-only node"
# check etcd membership and cluster health
result = wait_for_etcd_cluster_health(node, etcd_private_ip)
for member in etcd_members:
expect = "got healthy result from https://{}".format(
member)
assert expect in result, result
assert 'cluster is healthy' in result, result
class PodIntercommunicationValidation(object):
def __init__(self, kubectl, base_namespace):
self.kubectl = kubectl
self.yml_file = (
k8s_resource_dir + 'daemonset_pods_per_node.yml')
self.ns_out = 'daemonset-out-{}'.format(base_namespace)
self.ns_in = 'daemonset-in-{}'.format(base_namespace)
self.selector = 'name=daemonset-test1'
def setup(self):
self.kubectl.create_ns(self.ns_out)
result = self.kubectl.create_resourse_from_yml(
self.yml_file, namespace=self.ns_out)
self.kubectl.create_ns(self.ns_in)
result = self.kubectl.create_resourse_from_yml(
self.yml_file, namespace=self.ns_in)
def validate(self):
"""
Gets pod name, pod ip, host ip, and containers
For each pod, use kubectl exec to ping all other pod ips
Asserts that each ping is successful
Tears down daemonset
"""
# get number of expected pods
worker_nodes = self.kubectl.get_resource(
'nodes', selector='node-role.kubernetes.io/worker=true')
master_nodes = self.kubectl.get_resource(
'nodes', selector='node-role.kubernetes.io/controlplane=true')
node_names = [n['metadata']['name'] for n in worker_nodes['items']]
expected_number_pods = len(worker_nodes['items'])
"""
for master_node in master_nodes['items']:
if master_node['metadata']['name'] not in node_names:
expected_number_pods += 1
"""
# get pods on each node/namespaces to test intercommunication
# with pods on different nodes
pods_to_ping = self.kubectl.wait_for_pods(
selector=self.selector, namespace=self.ns_in,
number_of_pods=expected_number_pods)
pods_from_which_ping = self.kubectl.wait_for_pods(
selector=self.selector, namespace=self.ns_out,
number_of_pods=expected_number_pods)
# verify daemonset pods are on all worker nodes
assert len(pods_to_ping['items']) == expected_number_pods, (
"DaemonSet number of pods '{0}', does not match number of worker "
"nodes '{1}'".format(
len(pods_to_ping['items']), expected_number_pods))
assert len(pods_from_which_ping['items']) == expected_number_pods, (
"DaemonSet number of pods '{0}', does not match number of worker "
"nodes '{1}'".format(
len(pods_from_which_ping['items']), expected_number_pods))
pod_ips_to_ping = []
for pod in pods_to_ping['items']:
pod_ips_to_ping.append(pod['status']['podIP'])
pod_names_to_ping_from = []
for pod in pods_from_which_ping['items']:
pod_names_to_ping_from.append(pod['metadata']['name'])
# From each pod of daemonset in namespace ns_out, ping all pods
# in from second daemonset in ns_in
expect_result = \
'1 packets transmitted, 1 received, 0% packet loss'
for pod_name in pod_names_to_ping_from:
for pod_ip in pod_ips_to_ping:
cmd = 'ping -c 1 {0}'.format(pod_ip)
for _ in range(10):
result = self.kubectl.exec_cmd(pod_name, cmd, self.ns_out)
assert expect_result in result, (
"Could not ping pod with ip {0} from pod {1}:\n"
"stdout: {2}\n".format(
pod_ip, pod_name, result))
def teardown(self):
"""
Deletes a daemonset of pods and namespace
"""
result = self.kubectl.delete_resourse_from_yml(
self.yml_file, namespace=self.ns_out)
result = self.kubectl.delete_resourse_from_yml(
self.yml_file, namespace=self.ns_in)
self.kubectl.delete_resourse('namespace', self.ns_out)
self.kubectl.delete_resourse('namespace', self.ns_in)
class DNSServiceDiscoveryValidation(object):
def __init__(self, kubectl, base_namespace):
namespace_one = 'nsone-{}'.format(base_namespace)
namespace_two = 'nstwo-{}'.format(base_namespace)
self.namespace = namespace_one
self.services = {
'k8test1': {
'namespace': namespace_one,
'selector': 'k8s-app=k8test1-service',
'yml_file': k8s_resource_dir + 'service_k8test1.yml',
},
'k8test2': {
'namespace': namespace_two,
'selector': 'k8s-app=k8test2-service',
'yml_file': k8s_resource_dir + 'service_k8test2.yml',
}
}
self.pod_selector = 'k8s-app=pod-test-util'
self.kubectl = kubectl
def setup(self):
for service_name, service_info in self.services.items():
# create service
result = self.kubectl.create_ns(service_info['namespace'])
result = self.kubectl.create_resourse_from_yml(
service_info['yml_file'], namespace=service_info['namespace'])
result = self.kubectl.create_resourse_from_yml(
k8s_resource_dir + 'single_pod.yml',
namespace=self.namespace)
def validate(self):
# wait for exec pod to be ready before validating
pods = self.kubectl.wait_for_pods(
selector=self.pod_selector, namespace=self.namespace)
exec_pod_name = pods['items'][0]['metadata']['name']
# Get Cluster IP and pod names per service
dns_records = {}
for service_name, service_info in self.services.items():
# map expected IP to dns service name
dns = "{0}.{1}.svc.cluster.local".format(
service_name, service_info['namespace'])
svc = self.kubectl.get_resource(
'svc', name=service_name, namespace=service_info['namespace'])
service_pods = self.kubectl.wait_for_pods(
selector=service_info['selector'],
namespace=service_info['namespace'], number_of_pods=2)
svc_cluster_ip = svc["spec"]["clusterIP"]
dns_records[dns] = {
'ip': svc_cluster_ip,
'pods': [p['metadata']['name'] for p in service_pods['items']]
}
for dns_record, dns_info in dns_records.items():
# Check dns resolution
expected_ip = dns_info['ip']
cmd = 'dig {0} +short'.format(dns_record)
result = self.kubectl.exec_cmd(exec_pod_name, cmd, self.namespace)
assert expected_ip in result, (
"Unable to test DNS resolution for service {0}: {1}".format(
dns_record, result.stderr))
# Check Cluster IP reaches pods in service
pods_names = dns_info['pods']
cmd = 'curl "http://{0}/name.html"'.format(dns_record)
result = self.kubectl.exec_cmd(exec_pod_name, cmd, self.namespace)
print(result)
print(pods_names)
assert result.rstrip() in pods_names, (
"Service ClusterIP does not reach pods {0}".format(
dns_record))
def teardown(self):
self.kubectl.delete_resourse(
'pod', 'pod-test-util', namespace=self.namespace)
for service_name, service_info in self.services.items():
self.kubectl.delete_resourse_from_yml(
service_info['yml_file'], namespace=service_info['namespace'])
self.kubectl.delete_resourse(
'namespace', service_info['namespace'])
def validate_k8s_service_images(nodes, k8s_version, rke_client, kubectl):
"""
expectedimages dictionary will be built in this method
This verifies that the nodes have the correct image version
This does not validate containers per role,
assert_containers_exist_for_roles method does that
"""
expectedimagesdict = build_expectedimages_dict(k8s_version, rke_client)
print(expectedimagesdict)
for node in nodes:
containers = node.docker_ps()
allcontainers = node.docker_ps(includeall=True)
print("Container Dictionary ")
print(containers)
print("All containers dictionary")
print(allcontainers)
sidekickservice = "service-sidekick"
for key in expectedimagesdict.keys():
servicename = key
if servicename in containers:
print("Service name")
print(servicename)
print(expectedimagesdict[servicename])
print(containers[servicename])
assert expectedimagesdict[servicename] == \
containers[servicename], (
"K8s service '{0}' does not match config version "
"{1}, found {2} on node {3}".format(
servicename, expectedimagesdict[servicename],
containers[servicename], node.node_name))
if sidekickservice in expectedimagesdict.keys():
if sidekickservice in allcontainers:
print("sidekick-service in allcontainers")
print(sidekickservice)
print(expectedimagesdict[sidekickservice])
print(allcontainers[sidekickservice])
assert expectedimagesdict[sidekickservice] == \
allcontainers[sidekickservice], (
"K8s service '{0}' does not match config version "
"{1}, found {2} on node {3}".format(
sidekickservice, expectedimagesdict[sidekickservice],
allcontainers[sidekickservice], node.node_name))
verify_ingress_addon_images(k8s_version, kubectl,
"ingress-nginx", "app=ingress-nginx",
"app=default-http-backend")
verify_networking_addon_images(k8s_version, kubectl,
"kube-system", "k8s-app=canal")
verify_metrics_server_addon_images(k8s_version, kubectl,
"kube-system", "k8s-app=metrics-server")
verify_dns_addon_images(k8s_version, kubectl,
"kube-system", "k8s-app=kube-dns")
def validate_remove_cluster(nodes):
"""
Removes all k8s services containers on each node:
['kubelet', 'kube-proxy', 'kube-scheduler', 'kube-controller-manager',
'kube-apiserver', 'nginx-proxy']
Removes files from these directories:
['/etc/kubernetes/ssl', '/var/lib/etcd'
'/etc/cni', '/opt/cni', '/var/run/calico']
"""
k8s_services = [
'kubelet', 'kube-proxy', 'kube-scheduler', 'kube-controller-manager',
'kube-apiserver', 'nginx-proxy'
]
rke_cleaned_directories = [
'/etc/kubernetes/ssl', '/var/lib/etcd' '/etc/cni', '/opt/cni',
'/var/run/calico'
]
for node in nodes:
containers = node.docker_ps()
for service in k8s_services:
assert service not in list(containers.keys()), (
"Found kubernetes service '{0}' still running on node '{1}'"
.format(service, node.node_name))
for directory in rke_cleaned_directories:
result = node.execute_command('ls ' + directory)
assert result[0] == '', (
"Found a non-empty directory '{0}' after remove on node '{1}'"
.format(directory, node.node_name))
def validate_dashboard(kubectl):
# Start dashboard
# Validated it is reachable
pass
| 25,659 | 39.601266 | 80 |
py
|
rancher
|
rancher-master/tests/validation/tests/rke/__init__.py
| 0 | 0 | 0 |
py
|
|
rancher
|
rancher-master/tests/validation/tests/rke/test_remove_cluster.py
|
from .conftest import * # NOQA
from .common import * # NOQA
def test_remove_1(test_name, cloud_provider, rke_client, kubectl):
"""
Create a three node cluster and runs validation to create pods
Removes cluster and validates components are removed
Then creates new cluster on the same nodes and validates
"""
rke_template = 'cluster_install_config_1.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes)
rke_client.remove()
validate_remove_cluster(nodes)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
| 718 | 31.681818 | 66 |
py
|
rancher
|
rancher-master/tests/validation/tests/rke/test_install_config.py
|
from .conftest import * # NOQA
from .common import * # NOQA
def test_install_config_1(test_name, cloud_provider, rke_client, kubectl):
"""
Node Address specified as just IP and using only this in the node spec
Specific kubernetes_version can be used
"""
rke_template = 'cluster_install_config_11.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_config_2(test_name, cloud_provider, rke_client, kubectl):
"""
Node Address specified as FQDN and using only this in the node spec
"""
rke_template = 'cluster_install_config_2.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_config_3(test_name, cloud_provider, rke_client, kubectl):
"""
Hostname override specified as a non resolvable name
"""
rke_template = 'cluster_install_config_3.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
# set node_name to non-resolvable name for hostname_override
index = 0
for node in nodes:
node.node_name = "{0}-{1}".format(test_name, index)
index += 1
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_config_4(test_name, cloud_provider, rke_client, kubectl):
"""
Hostname override specified as a resolvable name
"""
rke_template = 'cluster_install_config_4.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
# set node_name to the resolvable host_name for hostname_override
for node in nodes:
node.node_name = node.host_name
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_config_5(test_name, cloud_provider, rke_client, kubectl):
"""
Internal address provided
"""
rke_template = 'cluster_install_config_5.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True, etcd_private_ip=True)
def test_install_config_6(test_name, cloud_provider, rke_client, kubectl):
"""
Providing address, hostname override(resolvable) and internal address
"""
rke_template = 'cluster_install_config_6.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
# set node_name to the resolvable host_name for hostname_override
for node in nodes:
node.node_name = node.host_name
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True, etcd_private_ip=True)
def test_install_config_7(test_name, cloud_provider, rke_client, kubectl):
"""
Providing address, hostname override(non-resolvable) and internal address
"""
rke_template = 'cluster_install_config_7.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
# set node_name to non-resolvable name for hostname_override
index = 0
for node in nodes:
node.node_name = "{0}-{1}".format(test_name, index)
index += 1
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True, etcd_private_ip=True)
def test_install_config_8(test_name, cloud_provider, rke_client, kubectl):
"""
Create a cluster with minimum possible values, will use the defaulted
network plugin for RKE
"""
rke_template = 'cluster_install_config_8.yml.j2'
nodes = cloud_provider.create_multiple_nodes(3, test_name)
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
def test_install_config_9(test_name, cloud_provider, rke_client, kubectl):
"""
Launch a cluster with unencrypted ssh keys
"""
key_name = 'install-config-9'
rke_template = 'cluster_install_config_9.yml.j2'
public_key = cloud_provider.generate_ssh_key(key_name)
cloud_provider.import_ssh_key(key_name + '.pub', public_key)
nodes = cloud_provider.create_multiple_nodes(
3, test_name, key_name=key_name + '.pub')
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, nodes,
remove_nodes=True)
# def test_install_config_10(test_name, cloud_provider, rke_client, kubectl):
# """
# Launch a cluster with encrypted ssh keys
# """
# rke_template = 'cluster_install_config_10.yml.j2'
# nodes = cloud_provider.create_multiple_nodes(3, test_name)
# create_rke_cluster(rke_client, kubectl, nodes, rke_template)
# validate_rke_cluster(rke_client, kubectl, nodes)
# for node in nodes:
# cloud_provider.delete_node(node)
| 4,984 | 35.654412 | 77 |
py
|
rancher
|
rancher-master/tests/validation/tests/v1_api/test_fleet.py
|
from .common import * # NOQA
import pytest
namespace = {'client': None, 'cluster': None}
def test_fleet_simple():
client = namespace['client']
template = read_json_from_resource_dir("fleet_1.json")
name = random_name()
# set name
template['metadata']['name'] = name
# set target
cluster_id = namespace['cluster']['id']
match_labels = template['spec']['targets'][0]['clusterSelector']['matchLabels']
match_labels['management.cattle.io/cluster-name'] = cluster_id
res = client.create_fleet_cattle_io_gitrepo(template)
res = validate_fleet(client, res)
# delete the fleet
client.delete(res)
def validate_fleet(client, fleet):
# the gitRepo's state shows active at the beginning
# which is not the actual state
time.sleep(10)
try:
wait_for(lambda: client.reload(fleet).metadata.state.name == 'active',
timeout_message='time out waiting for gitRepo to be ready')
except Exception as e:
assert False, str(e)
fleet = client.reload(fleet)
# validate the bundle is active
bundle = get_bundle_by_fleet_name(client, fleet.metadata.name)
wait_for(lambda: client.reload(bundle).metadata.state.name == 'active',
timeout_message='time out waiting for the bundle to be ready')
return fleet
def get_bundle_by_fleet_name(client, name):
start_time = time.time()
while time.time() - start_time < 30:
res = client.list_fleet_cattle_io_bundle()
for bundle in res['data']:
keys = bundle['metadata']['labels'].keys()
print("------")
print(keys)
if 'fleet.cattle.io/repo-name' in keys:
bundle_name = \
bundle['metadata']['labels']['fleet.cattle.io/repo-name']
if bundle_name == name:
return bundle
time.sleep(5)
return None
@pytest.fixture(scope='module', autouse='True')
def create_client(request):
if CLUSTER_NAME == '':
assert False, 'no cluster is provided, cannot run tests for fleet'
client = get_admin_client_v1()
namespace['client'] = client
res = client.list_management_cattle_io_cluster()
for cluster in res.data:
if cluster['spec']['displayName'] == CLUSTER_NAME:
namespace['cluster'] = cluster
if namespace['cluster'] is None:
assert False, 'cannot find the target cluster'
| 2,426 | 33.671429 | 83 |
py
|
rancher
|
rancher-master/tests/validation/tests/v1_api/test_deployment.py
|
from .common import * # NOQA
import pytest
namespace = {"client": None, "ns": None}
def test_namespace_create():
template = read_yaml_from_resource_dir("namespace.yaml")
template["metadata"]["name"] = random_test_name()
client = namespace["client"]
res = client.create_namespace(template)
# validate the namespace is created
ns = client.by_id_namespace(res.id)
assert ns.id == res.id
# delete the namespace at the end
client.delete(ns)
def test_deployment():
client = namespace["client"]
ns = namespace["ns"]
template = read_json_from_resource_dir("deployment_1.json")
name = random_name()
# set name
template["metadata"]["name"] = name
# set namespace
template["metadata"]["namespace"] = ns.id
# set container image and name
template["spec"]["template"]["spec"]["containers"][0]["image"] = TEST_IMAGE_V1
template["spec"]["template"]["spec"]["containers"][0]["name"] = name
# set label and selector
label_value = "apps.deployment-{}-{}".format(ns.id, name)
labels = template["spec"]["template"]["metadata"]["labels"]
labels["workload.user.cattle.io/workloadselector"] = label_value
matches = template["spec"]["selector"]["matchLabels"]
matches["workload.user.cattle.io/workloadselector"] = label_value
deployment = client.create_apps_deployment(template)
deployment = validate_deployment(client, deployment)
# scale up to 5 pods
deployment.spec.replicas = 5
deployment = client.update(deployment, deployment)
deployment = validate_deployment(client, deployment)
client.delete(deployment)
def validate_deployment(client, deployment):
# wait for the deployment to be active
wait_for(lambda: client.reload(deployment).metadata.state.name == "active",
timeout_message="time out waiting for deployment to be ready")
res = client.reload(deployment)
name = res["metadata"]["name"]
namespace = res["metadata"]["namespace"]
replicas = res["spec"]["replicas"]
# Rancher Dashboard gets pods by passing the label selector
target_label = 'workload.user.cattle.io/workloadselector=apps.deployment-{}-{}'
pods = client.list_pod(
labelSelector=target_label.format(namespace, name))
assert "data" in pods.keys(), "failed to get pods"
assert len(pods.data) == replicas, "failed to get the right number of pods"
for pod in pods.data:
assert pod.metadata.state.name == "running"
return res
@pytest.fixture(scope='module', autouse="True")
def create_client(request):
client = get_cluster_client_for_token_v1()
template = read_yaml_from_resource_dir("namespace.yaml")
template["metadata"]["name"] = random_test_name()
ns = client.create_namespace(template)
namespace["client"] = client
namespace["ns"] = ns
def fin():
client.delete(namespace["ns"])
request.addfinalizer(fin)
| 2,920 | 35.5125 | 83 |
py
|
rancher
|
rancher-master/tests/validation/tests/v1_api/test_monitoring_v2.py
|
from .common import * # NOQA
import pytest
import requests
import semver
namespace = {'client': None,
'cluster': None,
'rancher_catalog': None,
'project': None}
m_chart_name = 'rancher-monitoring'
m_version = os.environ.get('RANCHER_MONITORING_V2_VERSION', None)
m_app = 'cattle-monitoring-system/rancher-monitoring'
m_crd = 'cattle-monitoring-system/rancher-monitoring-crd'
m_namespace = "cattle-monitoring-system"
def test_install_monitoring_v2():
install_monitoring()
def install_monitoring():
client = namespace['client']
rancher_catalog = namespace['rancher_catalog']
# install the monitoring v2 app into the new project
project_id = namespace["project"]["id"]
cluster_id = namespace["cluster"]["id"]
cluster_name = namespace["cluster"]["spec"]["displayName"]
values = read_json_from_resource_dir("monitoring_v2_values.json")
values["projectId"] = project_id
for chart in values["charts"]:
chart["version"] = m_version
chart["projectId"] = project_id
chart["values"]["global"]["cattle"]["clusterId"] = cluster_id
chart["values"]["global"]["cattle"]["clusterName"] = cluster_name
client.action(rancher_catalog, "install", values)
# wait 2 minutes for the app to be fully deployed
time.sleep(120)
# check the app rancher-monitoring-crd first then rancher-monitoring
wait_for(
lambda: client.by_id_catalog_cattle_io_app(m_crd).status.summary.state == "deployed",
timeout_message="time out waiting for app to be ready")
wait_for(
lambda: client.by_id_catalog_cattle_io_app(m_app).status.summary.state == "deployed",
timeout_message="time out waiting for app to be ready")
def uninstall_monitoring():
client = namespace['client']
# firstly, uninstall the monitoring app
app = client.by_id_catalog_cattle_io_app(m_app)
if app is not None:
client.action(app, "uninstall")
wait_for(
lambda: client.by_id_catalog_cattle_io_app(m_app) is None,
timeout_message="Timeout waiting for uninstalling monitoring")
# then, clean up the secrets left in the namespace
res = client.list_secret()
if "data" in res.keys():
for item in res.get("data"):
if m_namespace in item['id']:
client.delete(item)
# then, the crd app
app = client.by_id_catalog_cattle_io_app(m_crd)
if app is not None:
client.action(app, "uninstall")
wait_for(
lambda: client.by_id_catalog_cattle_io_app(m_crd) is None,
timeout_message="Timeout waiting for uninstalling monitoring crd")
# finally, the namespace
ns = client.by_id_namespace(m_namespace)
if ns is not None:
client.delete(ns)
wait_for(
lambda: client.by_id_namespace(m_namespace) is None,
timeout_message="Timeout waiting for deleting the namespace")
def check_monitoring_exist():
client = namespace['client']
ns = client.by_id_namespace(m_namespace)
app = client.by_id_catalog_cattle_io_app(m_app)
crd = client.by_id_catalog_cattle_io_app(m_crd)
ns_exist = False if ns is None else True
app_deployed = False if app is None else True
crd_app_deployed = False if crd is None else True
return ns_exist or app_deployed or crd_app_deployed
def get_chart_latest_version(catalog, chart_name):
headers = {"Accept": "application/json",
"Authorization": "Bearer " + USER_TOKEN}
url = catalog["links"]["index"]
response = requests.get(url=url, verify=False, headers=headers)
assert response.status_code == 200, \
"failed to get the response from {}".format(url)
assert response.content is not None, \
"no chart is returned from {}".format(url)
res = json.loads(response.content)
assert chart_name in res["entries"].keys(), \
"failed to find the chart {} from the chart repo".format(chart_name)
charts = res['entries'][chart_name]
versions = []
for chart in charts:
versions.append(chart["version"])
latest = versions[0]
for version in versions:
if semver.compare(latest, version) < 0:
latest = version
return latest
@pytest.fixture(scope='module', autouse="True")
def create_client(request):
client = get_cluster_client_for_token_v1()
admin_client = get_admin_client_v1()
cluster = get_cluster_by_name(admin_client, CLUSTER_NAME)
namespace["client"] = client
namespace["cluster"] = cluster
rancher_catalog = \
client.by_id_catalog_cattle_io_clusterrepo(id="rancher-charts")
if rancher_catalog is None:
assert False, "rancher-charts is not available"
namespace["rancher_catalog"] = rancher_catalog
# set the monitoring chart version the latest if it is not provided
global m_version
if m_version is None:
m_version = \
get_chart_latest_version(rancher_catalog, m_chart_name)
print("chart version is not provided, "
"get chart version from repo: {}".format(m_version))
project = create_project(cluster, random_name())
namespace["project"] = project
if check_monitoring_exist() is True:
uninstall_monitoring()
def fin():
uninstall_monitoring()
client.delete(namespace["project"])
request.addfinalizer(fin)
| 5,400 | 36.769231 | 93 |
py
|
rancher
|
rancher-master/tests/validation/tests/v1_api/common.py
|
from ..common import * # NOQA
import pprint
import json
import yaml
import rancher
TEST_IMAGE_V1 = os.environ.get('RANCHER_TEST_IMAGE_V1', "ranchertest/mytestcontainer")
def get_admin_client_v1():
url = CATTLE_TEST_URL + "/v1"
# in fact, we get the cluster client for the local cluster
return rancher.Client(url=url, token=ADMIN_TOKEN, verify=False)
def get_cluster_client_for_token_v1(cluster_id=None, token=None):
if cluster_id is None:
cluster = get_cluster_by_name(get_admin_client_v1(), CLUSTER_NAME)
cluster_id = cluster["id"]
if token is None:
token = USER_TOKEN
url = CATTLE_TEST_URL + "/k8s/clusters/" + cluster_id + "/v1/schemas"
return rancher.Client(url=url, token=token, verify=False)
def get_cluster_by_name(client, cluster_name):
res = client.list_management_cattle_io_cluster()
assert "data" in res.keys(), "failed to find any cluster in the setup"
for cluster in res["data"]:
if cluster["spec"]["displayName"] == cluster_name:
return cluster
assert False, "failed to find the cluster {}".format(cluster_name)
def display(res):
if res is None:
print("None object is returned")
return
if isinstance(res, dict) and "data" in res.keys():
print("count of data {}".format(len(res.data)))
for item in res.get("data"):
print("-------")
pprint.pprint(item)
return
else:
print("This is a instance of {}".format(type(res)))
pprint.pprint(res)
def read_json_from_resource_dir(filename):
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
with open('{}/resource/{}'.format(dir_path, filename)) as f:
data = json.load(f)
return data
except FileNotFoundError as e:
assert False, e
def read_yaml_from_resource_dir(filename):
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
with open('{}/resource/{}'.format(dir_path, filename)) as f:
data = yaml.safe_load(f)
return data
except FileNotFoundError as e:
assert False, e
def create_project(cluster, project_name):
admin_client = get_admin_client_v1()
project = read_yaml_from_resource_dir("project.yaml")
project["metadata"]["name"] = project_name
project["metadata"]["namespace"] = cluster["id"]
project["spec"]["clusterName"] = cluster["id"]
project["spec"]["displayName"] = project_name
res = admin_client.create_management_cattle_io_project(project)
return res
| 2,559 | 31 | 86 |
py
|
rancher
|
rancher-master/tests/validation/tests/v1_api/__init__.py
| 0 | 0 | 0 |
py
|
|
rancher
|
rancher-master/tests/validation/tests/v1_api/test_daemonset.py
|
from .common import * # NOQA
import pytest
namespace = {"client": None, "ns": None}
def test_daemonset():
client = namespace["client"]
ns = namespace["ns"]
template = read_json_from_resource_dir("daemonset_1.json")
name = random_name()
# set name
template["metadata"]["name"] = name
# set namespace
template["metadata"]["namespace"] = ns.id
# set container image and name
template["spec"]["template"]["spec"]["containers"][0]["image"] = TEST_IMAGE_V1
template["spec"]["template"]["spec"]["containers"][0]["name"] = name
# set label and selector
label_value = "apps.daemonset-{}-{}".format(ns.id, name)
labels = template["spec"]["template"]["metadata"]["labels"]
labels["workload.user.cattle.io/workloadselector"] = label_value
matches = template["spec"]["selector"]["matchLabels"]
matches["workload.user.cattle.io/workloadselector"] = label_value
res = client.create_apps_daemonset(template)
res = validate_daemonset(client, res)
client.delete(res)
def get_worker_node(client):
nodes = client.list_node(
labelSelector="node-role.kubernetes.io/worker=true")
return nodes.data
def validate_daemonset(client, daemonset):
# wait for the deployment to be active
wait_for(lambda: client.reload(daemonset).metadata.state.name == "active",
timeout_message="time out waiting for deployment to be ready")
res = client.reload(daemonset)
name = res["metadata"]["name"]
namespace = res["metadata"]["namespace"]
node_count = len(get_worker_node(client))
# Rancher Dashboard gets pods by passing the label selector
label_key = 'workload.user.cattle.io/workloadselector'
label_value = 'apps.daemonset-{}-{}'.format(namespace, name)
pods = client.list_pod(
labelSelector='{}={}'.format(label_key, label_value))
assert "data" in pods.keys(), "failed to get pods"
assert len(pods.data) == node_count, "wrong number of pods"
for pod in pods.data:
assert label_value == pod.metadata.labels[label_key]
assert pod.metadata.state.name == "running"
return res
@pytest.fixture(scope='module', autouse="True")
def create_client(request):
client = get_cluster_client_for_token_v1()
template = read_yaml_from_resource_dir("namespace.yaml")
template["metadata"]["name"] = random_test_name()
ns = client.create_namespace(template)
namespace["client"] = client
namespace["ns"] = ns
def fin():
client.delete(namespace["ns"])
request.addfinalizer(fin)
| 2,561 | 34.583333 | 82 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_ad_custom_filter.py
|
from .common import * # NOQA
import requests
AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "")
'''
Prerequisite:
Enable AD without TLS, and using testuser1 as admin user.
Description:
In this test, we are testing the customized user and group search filter
functionalities.
1) For customized user search filter:
The filter looks like:
(&(objectClass=person)(|(sAMAccountName=test*)(sn=test*)(givenName=test*))
[user customized filter])
Here, after we add
userSearchFilter = (memberOf=CN=testgroup5,CN=Users,DC=tad,DC=rancher,DC=io)
we will filter out only testuser40 and testuser41, otherwise, all users start
with search keyword "testuser" will be listed out.
2) For customized group search filter:
The filter looks like:
(&(objectClass=group)(sAMAccountName=test)[group customized filter])
Here, after we add groupSearchFilter = (cn=testgroup2)
we will filter out only testgroup2, otherwise, all groups has search
keyword "testgroup" will be listed out.
'''
# Config Fields
HOSTNAME_OR_IP_ADDRESS = os.environ.get("RANCHER_HOSTNAME_OR_IP_ADDRESS")
PORT = os.environ.get("RANCHER_PORT")
CONNECTION_TIMEOUT = os.environ.get("RANCHER_CONNECTION_TIMEOUT")
SERVICE_ACCOUNT_NAME = os.environ.get("RANCHER_SERVICE_ACCOUNT_NAME")
SERVICE_ACCOUNT_PASSWORD = os.environ.get("RANCHER_SERVICE_ACCOUNT_PASSWORD")
DEFAULT_LOGIN_DOMAIN = os.environ.get("RANCHER_DEFAULT_LOGIN_DOMAIN")
USER_SEARCH_BASE = os.environ.get("RANCHER_USER_SEARCH_BASE")
GROUP_SEARCH_BASE = os.environ.get("RANCHER_GROUP_SEARCH_BASE")
PASSWORD = os.environ.get('RANCHER_USER_PASSWORD', "")
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + \
"/v3-public/"+AUTH_PROVIDER+"Providers/" + \
AUTH_PROVIDER.lower()+"?action=login"
CATTLE_AUTH_PROVIDER_URL = \
CATTLE_TEST_URL + "/v3/"+AUTH_PROVIDER+"Configs/"+AUTH_PROVIDER.lower()
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
CATTLE_AUTH_ENABLE_URL = CATTLE_AUTH_PROVIDER_URL + "?action=testAndApply"
CATTLE_AUTH_DISABLE_URL = CATTLE_AUTH_PROVIDER_URL + "?action=disable"
def test_custom_user_and_group_filter_for_AD():
disable_ad("testuser1", ADMIN_TOKEN)
enable_ad_with_customized_filter(
"testuser1",
"(memberOf=CN=testgroup5,CN=Users,DC=tad,DC=rancher,DC=io)",
"", ADMIN_TOKEN)
search_ad_users("testuser", ADMIN_TOKEN)
disable_ad("testuser1", ADMIN_TOKEN)
enable_ad_with_customized_filter(
"testuser1", "", "(cn=testgroup2)", ADMIN_TOKEN)
search_ad_groups("testgroup", ADMIN_TOKEN)
def disable_ad(username, token, expected_status=200):
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_DISABLE_URL, json={
"enabled": False,
"username": username,
"password": PASSWORD
}, verify=False, headers=headers)
assert r.status_code == expected_status
print("Disable ActiveDirectory request for " +
username + " " + str(expected_status))
def enable_ad_with_customized_filter(username, usersearchfilter,
groupsearchfilter, token,
expected_status=200):
headers = {'Authorization': 'Bearer ' + token}
activeDirectoryConfig = {
"accessMode": "unrestricted",
"userSearchFilter": usersearchfilter,
"groupSearchFilter": groupsearchfilter,
"connectionTimeout": CONNECTION_TIMEOUT,
"defaultLoginDomain": DEFAULT_LOGIN_DOMAIN,
"groupDNAttribute": "distinguishedName",
"groupMemberMappingAttribute": "member",
"groupMemberUserAttribute": "distinguishedName",
"groupNameAttribute": "name",
"groupObjectClass": "group",
"groupSearchAttribute": "sAMAccountName",
"nestedGroupMembershipEnabled": False,
"port": PORT,
"servers": [
HOSTNAME_OR_IP_ADDRESS
],
"serviceAccountUsername": SERVICE_ACCOUNT_NAME,
"userDisabledBitMask": 2,
"userEnabledAttribute": "userAccountControl",
"userLoginAttribute": "sAMAccountName",
"userNameAttribute": "name",
"userObjectClass": "person",
"userSearchAttribute": "sAMAccountName|sn|givenName",
"userSearchBase": USER_SEARCH_BASE,
"serviceAccountPassword": SERVICE_ACCOUNT_PASSWORD
}
r = requests.post(CATTLE_AUTH_ENABLE_URL, json={
"activeDirectoryConfig": activeDirectoryConfig,
"enabled": True,
"username": username,
"password": PASSWORD
}, verify=False, headers=headers)
assert r.status_code == expected_status
print("Enable ActiveDirectory request for " +
username + " " + str(expected_status))
def search_ad_users(searchkey, token, expected_status=200):
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': searchkey, 'principalType': 'user',
'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == expected_status
if r.status_code == 200:
print(r.json())
data = r.json()['data']
print(data)
assert len(data) == 2
print(data)
assert \
data[0].get('id') == \
"activedirectory_user://CN=test user40," \
"CN=Users,DC=tad,DC=rancher,DC=io"
assert \
data[1].get('id') == \
"activedirectory_user://CN=test user41," \
"CN=Users,DC=tad,DC=rancher,DC=io"
def search_ad_groups(searchkey, token, expected_status=200):
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': searchkey, 'principalType': 'group',
'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == expected_status
if r.status_code == 200:
data = r.json()['data']
assert len(data) == 1
assert \
data[0].get('id') == \
"activedirectory_group://CN=testgroup2," \
"CN=Users,DC=tad,DC=rancher,DC=io"
| 6,151 | 36.284848 | 77 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_windows_cluster.py
|
from .common import TEST_IMAGE
from .common import TEST_IMAGE_NGINX
from .common import TEST_IMAGE_OS_BASE
from .common import cluster_cleanup
from .common import get_user_client
from .common import random_test_name
from .test_rke_cluster_provisioning import create_custom_host_from_nodes
from .test_rke_cluster_provisioning import HOST_NAME
from lib.aws import AmazonWebServices, AWS_DEFAULT_AMI, AWS_DEFAULT_USER
def provision_windows_nodes():
node_roles_linux = [["controlplane"], ["etcd"], ["worker"]]
node_roles_windows = [["worker"], ["worker"], ["worker"]]
win_nodes = \
AmazonWebServices().create_multiple_nodes(
len(node_roles_windows), random_test_name(HOST_NAME))
linux_nodes = \
AmazonWebServices().create_multiple_nodes(
len(node_roles_linux), random_test_name(HOST_NAME),
ami=AWS_DEFAULT_AMI, ssh_user=AWS_DEFAULT_USER)
nodes = linux_nodes + win_nodes
node_roles = node_roles_linux + node_roles_windows
for node in win_nodes:
pull_images(node)
return nodes, node_roles
def test_windows_provisioning_vxlan():
nodes, node_roles = provision_windows_nodes()
cluster, nodes = create_custom_host_from_nodes(nodes, node_roles,
random_cluster_name=True,
windows=True,
windows_flannel_backend='vxlan')
cluster_cleanup(get_user_client(), cluster, nodes)
def test_windows_provisioning_gw_host():
nodes, node_roles = provision_windows_nodes()
for node in nodes:
AmazonWebServices().disable_source_dest_check(node.provider_node_id)
cluster, nodes = create_custom_host_from_nodes(nodes, node_roles,
random_cluster_name=True,
windows=True,
windows_flannel_backend='host-gw')
cluster_cleanup(get_user_client(), cluster, nodes)
def pull_images(node):
print("Pulling images on node: " + node.host_name)
pull_result = node.execute_command("docker pull " + TEST_IMAGE
+ " && " +
"docker pull " + TEST_IMAGE_NGINX
+ " && " +
"docker pull " + TEST_IMAGE_OS_BASE)
print(pull_result)
| 2,495 | 35.705882 | 85 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_volumes.py
|
import pytest
from rancher import ApiError
from .common import * # NOQA
namespace = {"p_client": None,
"ns": None,
"cluster": None,
"project": None,
"pv": None,
"pvc": None}
# this is the path to the mounted dir in the pod(workload)
MOUNT_PATH = "/var/nfs"
# if True then delete the NFS after finishing tests, otherwise False
DELETE_NFS = eval(os.environ.get('RANCHER_DELETE_NFS', "True"))
rbac_role_list = [
(CLUSTER_OWNER),
(PROJECT_OWNER),
(PROJECT_MEMBER),
(PROJECT_READ_ONLY),
(CLUSTER_MEMBER),
]
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_pv_create(role, remove_resource):
nfs_ip = namespace["nfs_ip"]
cluster = namespace["cluster"]
url = CATTLE_TEST_URL + "/v3/clusters/" + cluster.id + "/persistentvolume"
if (role == CLUSTER_OWNER):
user_token = rbac_get_user_token_by_role(role)
# Persistent volume can be created only using cluster client
owner_clusterclient = get_cluster_client_for_token(cluster, user_token)
pv = create_pv(owner_clusterclient, nfs_ip)
remove_resource(pv)
else:
# Users other than cluster owner cannot create persistent volume
# Other user clients do not have attribute to create persistent volume
# Hence a post request is made as below
user_token = rbac_get_user_token_by_role(role)
headers = {"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + user_token}
pv_config = {"type": "persistentVolume",
"accessModes": ["ReadWriteOnce"],
"name": "testpv",
"nfs": {"readOnly": "false",
"type": "nfsvolumesource",
"path": NFS_SERVER_MOUNT_PATH,
"server": nfs_ip
},
"capacity": {"storage": "10Gi"}
}
response = requests.post(url, json=pv_config, verify=False,
headers=headers)
assert response.status_code == 403
jsonresponse = json.loads(response.content)
assert jsonresponse['code'] == "Forbidden"
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_pv_list(role, remove_resource):
# All users can list the persistent volume which is at the cluster level
# A get request is performed as user clients do not have attrribute to list
# persistent volumes
nfs_ip = namespace["nfs_ip"]
cluster = namespace["cluster"]
user_token = rbac_get_user_token_by_role(role)
owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
owner_clusterclient = get_cluster_client_for_token(cluster, owner_token)
pv = create_pv(owner_clusterclient, nfs_ip)
pvname = pv['name']
url = CATTLE_TEST_URL + "/v3/cluster/" + cluster.id +\
"/persistentvolumes/" + pvname
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + user_token
}
response = requests.get(url, verify=False,
headers=headers)
jsonresponse = json.loads(response.content)
assert response.status_code == 200
assert jsonresponse['type'] == "persistentVolume"
assert jsonresponse['name'] == pvname
remove_resource(pv)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_pv_edit(role):
nfs_ip = namespace["nfs_ip"]
cluster = namespace["cluster"]
clusterowner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
owner_clusterclient = get_cluster_client_for_token(cluster,
clusterowner_token)
if(role == CLUSTER_OWNER):
# Verify editing of PV as a Cluster Owner succeeds
edit_pv(owner_clusterclient, nfs_ip, owner_clusterclient)
else:
user_token = rbac_get_user_token_by_role(role)
user_clusterclient = get_cluster_client_for_token(cluster,
user_token)
# Verify editing of PV is forbidden for other roles
with pytest.raises(ApiError) as e:
edit_pv(user_clusterclient, nfs_ip, owner_clusterclient)
print(e.value.error.code)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_pv_delete(role, remove_resource):
nfs_ip = namespace["nfs_ip"]
cluster = namespace["cluster"]
clusterowner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
owner_clusterclient = get_cluster_client_for_token(cluster,
clusterowner_token)
if(role == CLUSTER_OWNER):
pv = create_pv(owner_clusterclient, nfs_ip)
delete_pv(owner_clusterclient, pv)
else:
user_token = rbac_get_user_token_by_role(role)
user_clusterclient = get_cluster_client_for_token(cluster,
user_token)
# As a Cluster Owner create PV object using cluster client
pv = create_pv(owner_clusterclient, nfs_ip)
# Verify other roles cannot delete the PV object
with pytest.raises(ApiError) as e:
delete_pv(user_clusterclient, pv)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(pv)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_pvc_create(role, remove_resource):
user_project = None
nfs_ip = namespace["nfs_ip"]
cluster_client = namespace["cluster_client"]
if(role == CLUSTER_MEMBER):
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_project, ns = create_project_and_ns(user_token,
namespace["cluster"],
random_test_name(
"cluster-mem"))
p_client = get_project_client_for_token(user_project, user_token)
else:
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
if (role != PROJECT_READ_ONLY):
pv, pvc = create_pv_pvc(p_client, ns, nfs_ip, cluster_client)
remove_resource(pv)
remove_resource(pvc)
else:
project = rbac_get_project()
ns = rbac_get_namespace()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project,
user_token)
# Verify Read Only member cannot create PVC objects
with pytest.raises(ApiError) as e:
create_pv_pvc(readonly_user_client, ns, nfs_ip, cluster_client)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
if(user_project is not None):
remove_resource(user_project)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_pvc_create_negative(role, remove_resource):
nfs_ip = namespace["nfs_ip"]
cluster_client = namespace["cluster_client"]
if (role == CLUSTER_OWNER):
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
pv, pvc = create_pv_pvc(p_client, ns, nfs_ip, cluster_client)
remove_resource(pv)
remove_resource(pvc)
else:
unshared_project = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(role)
ns = rbac_get_unshared_ns()
p_client = get_project_client_for_token(unshared_project, user_token)
# Verify other members cannot create PVC objects
with pytest.raises(ApiError) as e:
create_pv_pvc(p_client, ns, nfs_ip, cluster_client)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_pvc_edit_negative(role, remove_resource):
# Editing of volume claims is not allowed for any role,
# We are verifying that editing is forbidden in shared
# and unshared projects
nfs_ip = namespace["nfs_ip"]
cluster_client = namespace["cluster_client"]
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
# Cluster owner client created in the shared project
owner_client = \
get_project_client_for_token(project, cluster_owner_token)
edit_pvc(p_client, ns, nfs_ip, cluster_client, owner_client)
unshared_project = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(role)
unshared_ns = rbac_get_unshared_ns()
user_client = get_project_client_for_token(unshared_project,
user_token)
# Cluster owner client created in the unshared project
owner_client = \
get_project_client_for_token(unshared_project, cluster_owner_token)
nfs_ip = namespace["nfs_ip"]
cluster_client = namespace["cluster_client"]
edit_pvc(user_client, unshared_ns, nfs_ip, cluster_client,
owner_client)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_pvc_delete(role, remove_resource):
nfs_ip = namespace["nfs_ip"]
cluster_client = namespace["cluster_client"]
user_project = None
if(role == CLUSTER_MEMBER):
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_project, ns = create_project_and_ns(user_token,
namespace["cluster"],
random_test_name(
"cluster-mem"))
p_client = get_project_client_for_token(user_project, user_token)
else:
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
if (role != PROJECT_READ_ONLY):
pv, pvc = create_pv_pvc(p_client, ns, nfs_ip, cluster_client)
delete_pvc(p_client, pvc, ns)
remove_resource(pv)
if user_project is not None:
remove_resource(user_project)
if (role == PROJECT_READ_ONLY):
project = rbac_get_project()
ns = rbac_get_namespace()
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = \
get_project_client_for_token(project, cluster_owner_token)
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_client = get_project_client_for_token(project, user_token)
# As a Cluster owner create a PVC object
pv, pvc = create_pv_pvc(cluster_owner_p_client, ns, nfs_ip,
cluster_client)
# Verify that the Read Only member cannot delete the PVC objects
# created by Cluster Owner
with pytest.raises(ApiError) as e:
delete_pvc(readonly_client, pvc, ns)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(pv)
remove_resource(pvc)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_pvc_delete_negative(role, remove_resource):
nfs_ip = namespace["nfs_ip"]
cluster_client = namespace["cluster_client"]
if (role == CLUSTER_OWNER):
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
pv, pvc = create_pv_pvc(p_client, ns, nfs_ip,
cluster_client)
delete_pvc(p_client, pvc, ns)
remove_resource(pv)
else:
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
owner_client = get_project_client_for_token(unshared_project,
cluster_owner_token)
user_token = rbac_get_user_token_by_role(role)
# As a Cluster Owner create pv, pvc
pv, pvc = create_pv_pvc(owner_client, ns, nfs_ip,
cluster_client)
user_client = get_project_client_for_token(unshared_project,
user_token)
with pytest.raises(ApiError) as e:
delete_pvc(user_client, pvc, ns)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(pv)
remove_resource(pvc)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_pvc_list(remove_resource, role):
user_project = None
nfs_ip = namespace["nfs_ip"]
cluster_client = namespace["cluster_client"]
if(role == CLUSTER_MEMBER):
cluster_member_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_project, ns = \
create_project_and_ns(cluster_member_token,
namespace["cluster"],
random_test_name("cluster-mem"))
user_client = get_project_client_for_token(user_project,
cluster_member_token)
# As a cluster member create a PVC and he should be able to list it
pv, pvc = create_pv_pvc(user_client, ns, nfs_ip, cluster_client)
else:
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
cluster_owner_p_client = \
get_project_client_for_token(project, cluster_owner_token)
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
user_client = get_project_client_for_token(project, user_token)
# As a Cluster Owner create pv, pvc
pv, pvc = create_pv_pvc(cluster_owner_p_client, ns, nfs_ip,
cluster_client)
pvcname = pvc["name"]
pvcdict = user_client.list_persistentVolumeClaim(name=pvcname)
print(pvcdict)
pvcdata = pvcdict['data']
assert len(pvcdata) == 1
assert pvcdata[0].type == "persistentVolumeClaim"
assert pvcdata[0].name == pvcname
remove_resource(pvc)
remove_resource(pv)
if user_client is not None:
remove_resource(user_project)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_pvc_list_negative(remove_resource, role):
nfs_ip = namespace["nfs_ip"]
cluster_client = namespace["cluster_client"]
if (role == CLUSTER_OWNER):
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
pv, pvc = create_pv_pvc(p_client, ns, nfs_ip, cluster_client)
pvcname = pvc['name']
pvcdict = p_client.list_persistentVolumeClaim(name=pvcname)
pvcdata = pvcdict.get('data')
assert len(pvcdata) == 1
assert pvcdata[0].type == "persistentVolumeClaim"
assert pvcdata[0].name == pvcname
remove_resource(pv)
remove_resource(pvc)
else:
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
cluster_owner_client = \
get_project_client_for_token(unshared_project, cluster_owner_token)
user_token = rbac_get_user_token_by_role(role)
user_client = get_project_client_for_token(unshared_project,
user_token)
# As a Cluster Owner create pv, pvc
pv, pvc = create_pv_pvc(cluster_owner_client, ns, nfs_ip,
cluster_client)
pvcname = pvc['name']
# Verify length of PVC list is zero for users with other roles
pvcdict = user_client.list_horizontalPodAutoscaler(name=pvcname)
pvcdata = pvcdict.get('data')
assert len(pvcdata) == 0
remove_resource(pv)
remove_resource(pvcname)
def edit_pvc(user_client, ns, nfs_ip, cluster_client,
cluster_owner_client):
# Create pv and pvc as cluster owner which will be used during edit_pvc
# negative test cases since roles other than cluster owner cannot
# create pvc in unshared projects
pv, pvc = create_pv_pvc(cluster_owner_client, ns, nfs_ip, cluster_client)
updated_pvc_config = {
"accessModes": ["ReadWriteOnce"],
"name": pvc['name'],
"volumeId": pv.id,
"namespaceId": ns.id,
"storageClassId": "",
"resources": {"requests": {"storage": "15Gi"}}
}
with pytest.raises(ApiError) as e:
user_client.update(pvc, updated_pvc_config)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
cluster_owner_client.delete(pvc)
cluster_client.delete(pv)
def delete_pvc(client, pvc, ns):
pvcname = pvc['name']
print(pvcname)
client.delete(pvc)
# Sleep to allow PVC to be deleted
time.sleep(5)
timeout = 30
pvcdict = client.list_persistentVolumeClaim(name=pvcname)
start = time.time()
if len(pvcdict.get('data')) > 0:
testdata = pvcdict.get('data')
print(testdata)
while pvcname in testdata[0]:
if time.time() - start > timeout:
raise AssertionError("Timed out waiting for deletion")
time.sleep(.5)
pvcdict = client.list_persistentVolumeClaim(name=pvcname)
testdata = pvcdict.get('data')
assert True
assert len(pvcdict.get('data')) == 0, "Failed to delete the PVC"
# Verify pvc is deleted by "kubectl get pvc" command
command = "get pvc {} --namespace {}".format(pvc['name'], ns.name)
print("Command to obtain the pvc")
print(command)
result = execute_kubectl_cmd(command, json_out=False, stderr=True)
print(result)
print("Verify that the pvc does not exist "
"and the error code returned is non zero ")
assert result != 0, "Result should be a non zero value"
def delete_pv(cluster_client, pv):
pvname = pv['name']
print(pvname)
cluster_client.delete(pv)
# Sleep to allow PVC to be deleted
time.sleep(5)
timeout = 30
pvdict = cluster_client.list_persistent_volume(name=pvname)
print(pvdict)
start = time.time()
if len(pvdict.get('data')) > 0:
testdata = pvdict.get('data')
print(testdata)
while pvname in testdata[0]:
if time.time() - start > timeout:
raise AssertionError("Timed out waiting for deletion")
time.sleep(.5)
pvcdict = cluster_client.list_persistent_volume(name=pvname)
testdata = pvcdict.get('data')
assert True
assert len(pvdict.get('data')) == 0, "Failed to delete the PV"
# Verify pv is deleted by "kubectl get pv" command
command = "get pv {} ".format(pv['name'])
print("Command to obtain the pvc")
print(command)
result = execute_kubectl_cmd(command, json_out=False, stderr=True)
print(result)
print("Verify that the pv does not exist "
"and the error code returned is non zero ")
assert result != 0, "Result should be a non zero value"
def edit_pv(client, nfs_ip, cluster_owner_client):
pv = create_pv(cluster_owner_client, nfs_ip)
updated_pv_config = {
"type": "persistentVolume",
"accessModes": ["ReadWriteOnce"],
"name": pv['name'],
"nfs": {"readOnly": "false",
"type": "nfsvolumesource",
"path": NFS_SERVER_MOUNT_PATH,
"server": nfs_ip
},
"capacity": {"storage": "20Gi"}
}
updated_pv = client.update(pv, updated_pv_config)
capacitydict = updated_pv['capacity']
assert capacitydict['storage'] == '20Gi'
assert pv['type'] == 'persistentVolume'
cluster_owner_client.delete(updated_pv)
@pytest.fixture(scope="module", autouse="True")
def volumes_setup(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
project, ns = create_project_and_ns(USER_TOKEN, cluster, "project-volumes")
p_client = get_project_client_for_token(project, USER_TOKEN)
nfs_node = provision_nfs_server()
nfs_ip = nfs_node.get_public_ip()
print("IP of the NFS Server: ", nfs_ip)
# add persistent volume to the cluster
cluster_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = project
namespace["nfs_ip"] = nfs_ip
namespace["cluster_client"] = cluster_client
def fin():
cluster_client = get_cluster_client_for_token(namespace["cluster"],
USER_TOKEN)
cluster_client.delete(namespace["project"])
cluster_client.delete(namespace["pv"])
if DELETE_NFS is True:
AmazonWebServices().delete_node(nfs_node)
request.addfinalizer(fin)
| 22,060 | 39.257299 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_logging.py
|
import pytest
import requests
import base64
import time
from .common import random_test_name, get_user_client
from .common import get_user_client_and_cluster
from .common import create_project_and_ns
from .common import get_project_client_for_token
from .common import create_kubeconfig
from .common import wait_for_app_to_active
from .common import wait_for_app_to_remove
from .common import CATTLE_TEST_URL
from .common import USER_TOKEN
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None,
"name_prefix": None, "admin_client": None, "sys_p_client": None}
fluentd_aggregator_answers = {"defaultImage": "true",
"image.repository": "guangbo/fluentd",
"image.tag": "dev",
"replicas": "1",
"service.type": "ClusterIP",
"persistence.enabled": "false",
"extraPersistence.enabled": "false",
"extraPersistence.size": "10Gi",
"extraPersistence.mountPath": "/extra",
"extraPersistence.storageClass": "",
"output.type": "custom",
"output.flushInterval": "5s",
"output.customConf": "<match **.**>\n @type stdout\n</match>"}
CATTLE_ClUSTER_LOGGING_FLUENTD_TEST = \
CATTLE_TEST_URL + "/v3/clusterloggings?action=test"
CATTLE_PROJECT_LOGGING_FLUENTD_TEST = \
CATTLE_TEST_URL + "/v3/projectloggings?action=test"
FLUENTD_AGGREGATOR_CATALOG_ID = "catalog://?catalog=library&template=fluentd-aggregator&version=0.3.1"
def test_send_log_to_fluentd(setup_fluentd_aggregator):
cluster = namespace["cluster"]
project = namespace["project"]
valid_endpoint = namespace["name_prefix"] + "-fluentd-aggregator" + \
"." + namespace["ns"].name + ".svc.cluster.local:24224"
print("fluentd aggregator endpoint:" + valid_endpoint)
send_log_to_fluentd_aggregator(CATTLE_ClUSTER_LOGGING_FLUENTD_TEST, valid_endpoint, cluster.id, project.id, USER_TOKEN)
send_log_to_fluentd_aggregator(CATTLE_PROJECT_LOGGING_FLUENTD_TEST, valid_endpoint, cluster.id, project.id, USER_TOKEN)
bad_format_endpoint = "http://fluentd.com:9092"
print("fluentd aggregator endpoint:" + bad_format_endpoint)
send_log_to_fluentd_aggregator(CATTLE_ClUSTER_LOGGING_FLUENTD_TEST, bad_format_endpoint, cluster.id, project.id, USER_TOKEN, expected_status=500)
send_log_to_fluentd_aggregator(CATTLE_PROJECT_LOGGING_FLUENTD_TEST, bad_format_endpoint, cluster.id, project.id, USER_TOKEN, expected_status=500)
def send_log_to_fluentd_aggregator(url, endpoint, clusterId, projectId, token, expected_status=204):
headers = {'Authorization': 'Bearer ' + token}
fluentdConfig = {
"fluentServers": [
{
"endpoint": endpoint,
"weight": 100
}
],
"enableTls": False,
"compress": True
}
r = requests.post(url,
json={"fluentForwarderConfig": fluentdConfig,
"clusterId": clusterId,
"projectId": projectId},
verify=False, headers=headers)
if len(r.content) != 0:
print(r.content)
assert r.status_code == expected_status
@pytest.fixture(scope='function')
def setup_fluentd_aggregator(request):
p_client = namespace["p_client"]
ns = namespace["ns"]
name = random_test_name("fluentd-aggregator")
namespace["name_prefix"] = name
app = p_client.create_app(name=name,
answers=fluentd_aggregator_answers,
targetNamespace=ns.name,
externalId=FLUENTD_AGGREGATOR_CATALOG_ID,
namespaceId=ns.id)
wait_for_app_to_active(p_client, app.name)
fluentd_app_name = "rancher-logging"
fluentd_secret_name = "rancher-logging-fluentd"
endpoint_host = "rancher.com"
endpoint_port = "24224"
username = "user1"
password = "my_password"
shared_key = "my_shared_key"
weight = 100
def test_fluentd_target(request):
cluster = namespace["cluster"]
cluster_logging = create_cluster_logging(fluentd_target_without_ssl())
request.addfinalizer(lambda: delete_cluster_logging(cluster_logging))
wait_for_logging_app()
# wait for config to sync
time.sleep(120)
config = get_fluentd_config("cluster.conf")
assert fluentd_ssl_configure("cluster_" + namespace["cluster"].id) not in config
assert fluentd_server_configure() in config
ssl_config = fluentd_target_with_ssl()
admin_client = namespace["admin_client"]
admin_client.update_by_id_cluster_logging(id=cluster_logging.id,
name=cluster_logging.name,
clusterId=cluster.id,
fluentForwarderConfig=ssl_config)
# wait for config to sync
time.sleep(60)
config = get_fluentd_config("cluster.conf")
assert fluentd_ssl_configure("cluster_" + namespace["cluster"].id) in config
assert fluentd_server_configure() in config
def test_project_fluentd_target(request):
project = namespace["project"]
wrap_project_name = project.id.replace(':', '_')
project_logging = create_project_logging(fluentd_target_without_ssl())
request.addfinalizer(lambda: delete_project_logging(project_logging))
wait_for_logging_app()
# wait for config to sync
time.sleep(60)
config = get_fluentd_config("project.conf")
assert fluentd_ssl_configure("project_" + wrap_project_name) not in config
assert fluentd_server_configure() in config
ssl_config = fluentd_target_with_ssl()
admin_client = namespace["admin_client"]
admin_client.update_by_id_project_logging(id=project_logging.id,
name=project_logging.name,
projectId=project.id,
fluentForwarderConfig=ssl_config)
# wait for config to sync
time.sleep(60)
config = get_fluentd_config("project.conf")
assert fluentd_ssl_configure("project_" + wrap_project_name) in config
assert fluentd_server_configure() in config
def wait_for_logging_app():
sys_p_client = namespace["sys_p_client"]
wait_for_app_to_active(sys_p_client, fluentd_app_name)
def fluentd_target_with_ssl():
return {"certificate": "-----BEGIN CERTIFICATE-----\
----END CERTIFICATE-----",
"clientCert": "-----BEGIN CERTIFICATE-----\
----END CERTIFICATE-----",
"clientKey": "-----BEGIN PRIVATE KEY-----\
----END PRIVATE KEY-----",
"compress": True,
"enableTls": True,
"fluentServers": [
{
"endpoint": endpoint_host + ":" + endpoint_port,
"username": username,
"password": password,
"sharedKey": shared_key,
"weight": weight
}
],
}
def fluentd_target_without_ssl():
return {"compress": True,
"enableTls": True,
"fluentServers": [
{
"endpoint": endpoint_host + ":" + endpoint_port,
"username": username,
"password": password,
"sharedKey": shared_key,
"weight": weight
}
],
}
def fluentd_ssl_configure(name):
return f"""tls_cert_path /fluentd/etc/config/ssl/{name}_ca.pem
tls_client_cert_path /fluentd/etc/config/ssl/{name}_client-cert.pem
tls_client_private_key_path /fluentd/etc/config/ssl/{name}_client-key.pem"""
def fluentd_server_configure():
return f"""<server>
host {endpoint_host}
port {endpoint_port}
shared_key {shared_key}
username {username}
password {password}
weight {weight}
</server>"""
def get_system_project_client():
cluster = namespace["cluster"]
admin_client = namespace["admin_client"]
projects = admin_client.list_project(name="System",
clusterId=cluster.id).data
assert len(projects) == 1
project = projects[0]
sys_p_client = get_project_client_for_token(project, USER_TOKEN)
return sys_p_client
def get_namespaced_secret(name):
sys_p_client = namespace["sys_p_client"]
secres = sys_p_client.list_namespaced_secret(name=name)
assert len(secres.data) == 1
return secres.data[0]
def get_fluentd_config(key):
secret = get_namespaced_secret(fluentd_secret_name)
base64_cluster_conf = secret.data[key]
tmp = base64.b64decode(base64_cluster_conf).decode("utf-8")
return strip_whitespace(tmp)
def strip_whitespace(ws):
new_str = []
for s in ws.strip().splitlines(True):
if s.strip():
new_str.append(s.strip())
return "\n".join(new_str)
def create_cluster_logging(config):
cluster = namespace["cluster"]
admin_client = namespace["admin_client"]
name = random_test_name("fluentd")
return admin_client.create_cluster_logging(name=name,
clusterId=cluster.id,
fluentForwarderConfig=config
)
def delete_cluster_logging(cluster_logging):
admin_client = namespace["admin_client"]
admin_client.delete(cluster_logging)
sys_p_client = namespace["sys_p_client"]
wait_for_app_to_remove(sys_p_client, fluentd_app_name)
def create_project_logging(config):
project = namespace["project"]
admin_client = namespace["admin_client"]
name = random_test_name("fluentd")
return admin_client.create_project_logging(name=name,
projectId=project.id,
fluentForwarderConfig=config
)
def delete_project_logging(project_logging):
admin_client = namespace["admin_client"]
admin_client.delete(project_logging)
sys_p_client = namespace["sys_p_client"]
wait_for_app_to_remove(sys_p_client, fluentd_app_name)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster,
random_test_name("testlogging"))
p_client = get_project_client_for_token(p, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["admin_client"] = client
namespace["sys_p_client"] = get_system_project_client()
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
| 11,139 | 36.635135 | 149 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_deploy_clusters.py
|
from .common import * # NOQA
from .test_aks_cluster import get_aks_version, create_and_validate_aks_cluster
from .test_eks_cluster import EKS_K8S_VERSIONS, create_and_validate_eks_cluster
from .test_gke_cluster import get_gke_config, \
create_and_validate_gke_cluster, get_gke_version_credentials
from .test_rke_cluster_provisioning import create_and_validate_custom_host
from .test_import_cluster import create_and_validate_import_cluster
env_details = "env.RANCHER_CLUSTER_NAMES='"
cluster_details = {"rke": {}, "rke_import": {},
"eks": {}, "aks": {}, "gke": {}}
if_not_auto_deploy_rke = pytest.mark.skipif(
ast.literal_eval(
os.environ.get(
'RANCHER_TEST_DEPLOY_RKE', "False")) is False,
reason='auto deploy RKE tests are skipped')
if_not_auto_deploy_eks = pytest.mark.skipif(
ast.literal_eval(
os.environ.get(
'RANCHER_TEST_DEPLOY_EKS', "False")) is False,
reason='auto deploy EKS tests are skipped')
if_not_auto_deploy_gke = pytest.mark.skipif(
ast.literal_eval(
os.environ.get(
'RANCHER_TEST_DEPLOY_GKE', "False")) is False,
reason='auto deploy GKE tests are skipped')
if_not_auto_deploy_aks = pytest.mark.skipif(
ast.literal_eval(
os.environ.get(
'RANCHER_TEST_DEPLOY_AKS', "False")) is False,
reason='auto deploy AKS tests are skipped')
if_not_auto_deploy_rke_import = pytest.mark.skipif(
ast.literal_eval(
os.environ.get(
'RANCHER_TEST_DEPLOY_RKE_IMPORT', "False")) is False,
reason='auto deploy RKE import tests are skipped')
@if_not_auto_deploy_rke
def test_deploy_rke():
print("Deploying RKE Clusters")
global env_details
global cluster_details
rancher_version = get_setting_value_by_name('server-version')
if str(rancher_version).startswith('v2.2'):
k8s_v = get_setting_value_by_name('k8s-version-to-images')
default_k8s_versions = json.loads(k8s_v).keys()
else:
k8s_v = get_setting_value_by_name('k8s-versions-current')
default_k8s_versions = k8s_v.split(",")
# Create clusters
for k8s_version in default_k8s_versions:
if env_details != "env.RANCHER_CLUSTER_NAMES='":
env_details += ","
print("Deploying RKE Cluster using kubernetes version {}".format(
k8s_version))
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
cluster, aws_nodes = create_and_validate_custom_host(
node_roles, random_cluster_name=True, version=k8s_version)
env_details += cluster.name
print("Successfully deployed {} with kubernetes version {}".format(
cluster.name, k8s_version))
cluster_details["rke"][cluster.name] = k8s_version
@if_not_auto_deploy_rke_import
def test_deploy_rke_import():
print("Deploying RKE import Clusters")
global env_details
global cluster_details
errors = []
# get rke k8s versions
rkecommand = "rke config --list-version -a"
result = run_command_with_stderr(rkecommand)
result = result.decode('ascii')
import_k8s_versions = result.split('\n')
import_k8s_versions = list(filter(None, import_k8s_versions))
print(import_k8s_versions)
for version in import_k8s_versions:
if env_details != "env.RANCHER_CLUSTER_NAMES='":
env_details += ","
try:
print("Deploying RKE import Cluster using kubernetes version {}".
format(version))
client, cluster, \
aws_nodes = \
create_and_validate_import_cluster(version, supportmatrix=True)
env_details += cluster.name
cluster_details["rke_import"][cluster.name] = version
except Exception as e:
errors.append(e)
@if_not_auto_deploy_eks
def test_deploy_eks():
print("Deploying EKS Clusters")
global env_details
errors = []
if len(EKS_K8S_VERSIONS) > 1:
k8s_versions = [EKS_K8S_VERSIONS[0], EKS_K8S_VERSIONS[-1]]
else:
k8s_versions = [EKS_K8S_VERSIONS[0]]
for version in k8s_versions:
if env_details != "env.RANCHER_CLUSTER_NAMES='":
env_details += ","
try:
print("Deploying EKS Cluster using kubernetes version {}".format(
version))
client, cluster = create_and_validate_eks_cluster(version)
env_details += cluster.name
cluster_details["eks"][cluster.name] = version
except Exception as e:
errors.append(e)
assert not errors
@if_not_auto_deploy_gke
def test_deploy_gke():
print("Deploying GKE Clusters")
global env_details
errors = []
gke_versions, creds = get_gke_version_credentials(multiple_versions=True)
for i, version in enumerate(gke_versions, start=1):
c_name = "test-auto-gke-{}".format(i)
if env_details != "env.RANCHER_CLUSTER_NAMES='":
env_details += ","
try:
print("Deploying GKE Cluster using kubernetes version {}".format(
version))
client, cluster = create_and_validate_gke_cluster(c_name,
version, creds)
env_details += cluster.name
cluster_details["gke"][cluster.name] = version
except Exception as e:
errors.append(e)
assert not errors
@if_not_auto_deploy_aks
def test_deploy_aks():
print("Deploying AKS Clusters")
global env_details
errors = []
aks_versions = get_aks_version(multiple_versions=True)
for version in aks_versions:
if env_details != "env.RANCHER_CLUSTER_NAMES='":
env_details += ","
try:
print("Deploying AKS Cluster using kubernetes version {}".format(
version))
client, cluster = create_and_validate_aks_cluster(version)
env_details += cluster.name
cluster_details["aks"][cluster.name] = version
except Exception as e:
errors.append(e)
assert not errors
@pytest.fixture(scope='module', autouse="True")
def set_data(request):
if UPDATE_KDM is True:
update_and_validate_kdm(KDM_URL)
def fin():
global env_details
global cluster_details
env_details += "'"
print("\n{}".format(env_details))
print("\n Cluster Details")
for cluster_type in cluster_details:
print(cluster_type + ": " + str(cluster_details[cluster_type]))
for cluster_type in cluster_details:
for cluster_name in cluster_details[cluster_type]:
print(cluster_type + " --> " +
str(cluster_details[cluster_type][cluster_name]))
create_config_file(env_details)
request.addfinalizer(fin)
| 6,871 | 34.791667 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_istio.py
|
import copy
import os
import re
import pytest
import time
from subprocess import CalledProcessError
from rancher import ApiError
from .test_auth import enable_ad, load_setup_data
from .common import add_role_to_user
from .common import auth_get_user_token
from .common import auth_resource_cleanup
from .common import AUTH_PROVIDER
from .common import AUTH_USER_PASSWORD
from .common import apply_crd
from .common import check_condition
from .common import compare_versions
from .common import CLUSTER_MEMBER
from .common import CLUSTER_OWNER
from .common import create_kubeconfig
from .common import create_project_and_ns
from .common import create_ns
from .common import DEFAULT_TIMEOUT
from .common import delete_crd
from .common import execute_kubectl_cmd
from .common import get_a_group_and_a_user_not_in_it
from .common import get_admin_client
from .common import get_client_for_token
from .common import get_cluster_client_for_token
from .common import get_crd
from .common import get_group_principal_id
from .common import get_project_client_for_token
from .common import get_user_by_group
from .common import get_user_client
from .common import get_user_client_and_cluster
from .common import if_test_group_rbac
from .common import if_test_rbac
from .common import login_as_auth_user
from .common import NESTED_GROUP_ENABLED
from .common import PROJECT_MEMBER
from .common import PROJECT_OWNER
from .common import PROJECT_READ_ONLY
from .common import random_test_name
from .common import rbac_get_kubeconfig_by_role
from .common import rbac_get_namespace
from .common import rbac_get_user_token_by_role
from .common import requests
from .common import run_command as run_command_common
from .common import ADMIN_TOKEN
from .common import USER_TOKEN
from .common import validate_all_workload_image_from_rancher
from .common import validate_app_deletion
from .common import wait_for_condition
from .common import wait_for_pod_to_running
from .common import wait_for_pods_in_workload
from .common import wait_for_wl_to_active
from .test_monitoring import C_MONITORING_ANSWERS
ISTIO_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "resource/istio")
ISTIO_CRD_PATH = os.path.join(ISTIO_PATH, "crds")
ISTIO_TEMPLATE_ID = "cattle-global-data:system-library-rancher-istio"
ISTIO_VERSION = os.environ.get('RANCHER_ISTIO_VERSION', "")
ISTIO_INGRESSGATEWAY_NODEPORT = os.environ.get(
'RANCHER_ISTIO_INGRESSGATEWAY_NODEPORT', 31380)
ISTIO_BOOKINFO_QUERY_RESULT = "<title>Simple Bookstore App</title>"
ISTIO_EXTERNAL_ID = "catalog://?catalog=system-library" \
"&template=rancher-istio&version="
DEFAULT_ANSWERS = {
"enableCRDs": "true",
"gateways.enabled": "true",
"gateways.istio-ingressgateway.type": "NodePort",
"gateways.istio-ingressgateway.ports[0].nodePort":
ISTIO_INGRESSGATEWAY_NODEPORT,
"gateways.istio-ingressgateway.ports[0].port": 80,
"gateways.istio-ingressgateway.ports[0].targetPort": 80,
"gateways.istio-ingressgateway.ports[0].name": "http2",
"global.monitoring.type": "cluster-monitoring"}
namespace = {"app_client": None, "app_ns": None, "gateway_url": None,
"system_ns": None, "system_project": None,
"istio_version": None, "istio_app": None}
crd_test_data = [
("policy.authentication.istio.io", "authenticationpolicy.yaml"),
# ("adapter.config.istio.io", "adapter.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("attributemanifest.config.istio.io", "attributemanifest.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
("handler.config.istio.io", "handler.yaml"),
# ("httpapispecbinding.config.istio.io", "httpapispecbinding.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("httpapispec.config.istio.io", "httpapispec.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("instance.config.istio.io", "instance.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
("quotaspecbinding.config.istio.io", "quotaspecbinding.yaml"),
("quotaspec.config.istio.io", "quotaspec.yaml"),
("rule.config.istio.io", "rule.yaml"),
# ("template.config.istio.io", "template.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
("destinationrule.networking.istio.io", "destinationrule.yaml"),
("envoyfilter.networking.istio.io", "envoyfilter.yaml"),
("gateway.networking.istio.io", "gateway.yaml"),
("serviceentry.networking.istio.io", "serviceentry.yaml"),
("sidecar.networking.istio.io", "sidecar.yaml"),
("virtualservice.networking.istio.io", "virtualservice.yaml"),
("rbacconfig.rbac.istio.io", "rbacconfig.yaml"),
("servicerolebinding.rbac.istio.io", "servicerolebinding.yaml"),
("servicerole.rbac.istio.io", "servicerole.yaml"),
("authorizationpolicy.security.istio.io", "authorizationpolicy.yaml"),
# ("certificate.certmanager.k8s.io", "certificate.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("challenge.certmanager.k8s.io", "challenge.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("clusterissuer.certmanager.k8s.io", "clusterissuer.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("issuer.certmanager.k8s.io", "issuer.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("order.certmanager.k8s.io", "order.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
]
def test_istio_resources():
app_client = namespace["app_client"]
app_ns = namespace["app_ns"]
gateway_url = namespace["gateway_url"]
create_and_test_bookinfo_services(app_client, app_ns)
create_bookinfo_virtual_service(app_client, app_ns)
create_and_test_bookinfo_gateway(app_client, app_ns, gateway_url)
create_and_test_bookinfo_routing(app_client, app_ns, gateway_url)
def test_istio_deployment_options():
file_path = ISTIO_PATH + '/nginx-custom-sidecar.yaml'
expected_image = "rancher/istio-proxyv2:1.4.3"
p_client = namespace["app_client"]
ns = namespace["app_ns"]
execute_kubectl_cmd('apply -f ' + file_path + ' -n ' + ns.name, False)
result = execute_kubectl_cmd('get deployment -n ' + ns.name, True)
for deployment in result['items']:
wl = p_client.list_workload(id='deployment:'
+ deployment['metadata']['namespace']
+ ':'
+ deployment['metadata']['name']).data[
0]
wl = wait_for_wl_to_active(p_client, wl, 60)
wl_pods = wait_for_pods_in_workload(p_client, wl, 1)
wait_for_pod_to_running(p_client, wl_pods[0])
workload = p_client.list_workload(name="nginx-v1",
namespaceId=ns.id).data[0]
pod = p_client.list_pod(workloadId=workload.id).data[0]
try:
assert any(container.image == expected_image
for container in pod.containers)
except AssertionError as e:
retrieved_images = ""
for container in pod.containers:
retrieved_images += container.image + " "
retrieved_images = retrieved_images.strip().split(" ")
raise AssertionError("None of {} matches '{}'".format(
retrieved_images, expected_image))
# Enables all possible istio custom answers with the exception of certmanager
def test_istio_custom_answers(skipif_unsupported_istio_version,
enable_all_options_except_certmanager):
expected_deployments = [
"grafana", "istio-citadel", "istio-egressgateway", "istio-galley",
"istio-ilbgateway", "istio-ingressgateway", "istio-pilot",
"istio-policy", "istio-sidecar-injector", "istio-telemetry",
"istio-tracing", "istiocoredns", "kiali", "prometheus"
]
expected_daemonsets = ["istio-nodeagent"]
expected_job_list = ["istio-onefive-migration" if int(namespace["istio_version"].split(".")[1]) >= 5 else None]
validate_all_workload_image_from_rancher(
get_system_client(USER_TOKEN), namespace["system_ns"],
ignore_pod_count=True, deployment_list=expected_deployments,
daemonset_list=expected_daemonsets, job_list=expected_job_list)
# This is split out separately from test_istio_custom_answers because
# certmanager creates its own crds outside of istio
@pytest.mark.skip(reason="To be removed, no support from 1.7.000")
def test_istio_certmanager_enables(skipif_unsupported_istio_version,
enable_certmanager):
expected_deployments = [
"certmanager", "istio-citadel", "istio-galley", "istio-ingressgateway",
"istio-pilot", "istio-policy", "istio-sidecar-injector",
"istio-telemetry", "istio-tracing", "kiali"
]
validate_all_workload_image_from_rancher(
get_system_client(USER_TOKEN), namespace["system_ns"],
ignore_pod_count=True, deployment_list=expected_deployments)
@if_test_rbac
def test_rbac_istio_metrics_allow_all_cluster_owner(allow_all_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_owner = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_access(kiali_url, cluster_owner)
validate_access(tracing_url, cluster_owner)
@if_test_rbac
def test_rbac_istio_monitoring_allow_all_cluster_owner(allow_all_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_owner = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_access(grafana_url, cluster_owner)
validate_access(prometheus_url, cluster_owner)
@if_test_rbac
def test_rbac_istio_metrics_allow_all_cluster_member(allow_all_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_access(kiali_url, cluster_member)
validate_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_all_cluster_member(allow_all_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_all_project_owner(allow_all_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_access(kiali_url, cluster_member)
validate_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_all_project_owner(allow_all_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_all_project_member(allow_all_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_access(kiali_url, cluster_member)
validate_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_all_project_member(allow_all_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_all_project_read(allow_all_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_access(kiali_url, cluster_member)
validate_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_all_project_read(allow_all_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_none_cluster_owner(default_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_owner = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_access(kiali_url, cluster_owner)
validate_access(tracing_url, cluster_owner)
@if_test_rbac
def test_rbac_istio_monitoring_allow_none_cluster_owner(default_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_owner = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_access(grafana_url, cluster_owner)
validate_access(prometheus_url, cluster_owner)
@if_test_rbac
def test_rbac_istio_metrics_allow_none_cluster_member(default_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_no_access(kiali_url, cluster_member)
validate_no_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_none_cluster_member(default_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_none_project_owner(default_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_no_access(kiali_url, cluster_member)
validate_no_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_none_project_owner(default_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_none_project_member(default_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_no_access(kiali_url, cluster_member)
validate_no_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_none_project_member(default_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_none_project_read(default_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_no_access(kiali_url, cluster_member)
validate_no_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_none_project_read(default_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_update_cluster_member():
user = rbac_get_user_token_by_role(CLUSTER_MEMBER)
with pytest.raises(ApiError) as e:
update_istio_app({"FOO": "BAR"}, user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_disable_cluster_member():
user = rbac_get_user_token_by_role(CLUSTER_MEMBER)
with pytest.raises(ApiError) as e:
delete_istio_app(user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_update_project_owner():
user = rbac_get_user_token_by_role(PROJECT_OWNER)
with pytest.raises(ApiError) as e:
update_istio_app({"FOO": "BAR"}, user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_disable_project_owner():
user = rbac_get_user_token_by_role(PROJECT_OWNER)
with pytest.raises(ApiError) as e:
delete_istio_app(user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_update_project_member():
user = rbac_get_user_token_by_role(PROJECT_MEMBER)
with pytest.raises(ApiError) as e:
update_istio_app({"FOO": "BAR"}, user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_disable_project_member():
user = rbac_get_user_token_by_role(PROJECT_MEMBER)
with pytest.raises(ApiError) as e:
delete_istio_app(user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_update_project_read():
user = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
with pytest.raises(ApiError) as e:
update_istio_app({"FOO": "BAR"}, user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_disable_project_read():
user = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
with pytest.raises(ApiError) as e:
delete_istio_app(user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("crd,manifest", crd_test_data)
def test_rbac_istio_crds_project_owner(skipif_unsupported_istio_version,
update_answers, crd, manifest):
if "certmanager" in crd:
update_answers("enable_certmanager")
else :
update_answers("default_access")
kubectl_context = rbac_get_kubeconfig_by_role(PROJECT_OWNER)
file = ISTIO_CRD_PATH + '/' + manifest
ns = rbac_get_namespace()
assert re.match("{}.* created".format(crd),
apply_crd(ns, file, kubectl_context))
assert "Forbidden" not in get_crd(ns, crd, kubectl_context)
assert re.match("{}.* deleted".format(crd),
delete_crd(ns, file, kubectl_context))
@if_test_rbac
@pytest.mark.parametrize("crd,manifest", crd_test_data)
def test_rbac_istio_crds_project_member(skipif_unsupported_istio_version,
update_answers, crd, manifest):
if "certmanager" in crd:
update_answers("enable_certmanager")
else :
update_answers("default_access")
kubectl_context = rbac_get_kubeconfig_by_role(PROJECT_MEMBER)
file = ISTIO_CRD_PATH + '/' + manifest
ns = rbac_get_namespace()
assert re.match("{}.* created".format(crd),
apply_crd(ns, file, kubectl_context))
assert "Forbidden" not in get_crd(ns, crd, kubectl_context)
assert re.match("{}.* deleted".format(crd),
delete_crd(ns, file, kubectl_context))
@if_test_rbac
@pytest.mark.parametrize("crd,manifest", crd_test_data)
def test_rbac_istio_crds_project_read(skipif_unsupported_istio_version,
update_answers, crd, manifest):
if "certmanager" in crd:
update_answers("enable_certmanager")
else :
update_answers("default_access")
kubectl_context = rbac_get_kubeconfig_by_role(PROJECT_READ_ONLY)
file = ISTIO_CRD_PATH + '/' + manifest
ns = rbac_get_namespace()
assert str(apply_crd(ns, file, kubectl_context)).startswith(
"Error from server (Forbidden)")
assert "Forbidden" not in get_crd(ns, crd, kubectl_context)
assert str(delete_crd(ns, file, kubectl_context)).startswith(
"Error from server (Forbidden)")
@if_test_group_rbac
def test_rbac_istio_group_access(auth_cluster_access, update_answers):
group, users, noauth_user = auth_cluster_access
update_answers("allow_group_access", group=group)
kiali_url, tracing_url, grafana_url, prometheus_url = get_urls()
for user in users:
user_token = auth_get_user_token(user)
print("Validating {} has access.".format(user))
validate_access(kiali_url, user_token)
validate_access(tracing_url, user_token)
validate_no_access(grafana_url, user_token)
validate_no_access(prometheus_url, user_token)
print("Validating {} does not have access.".format(noauth_user))
noauth_token = auth_get_user_token(noauth_user)
validate_no_access(kiali_url, noauth_token)
validate_no_access(tracing_url, noauth_token)
validate_no_access(grafana_url, noauth_token)
validate_no_access(prometheus_url, noauth_token)
def validate_access(url, user):
headers = {'Authorization': 'Bearer ' + user}
response = requests.get(headers=headers, url=url, verify=False)
assert response.ok
return response
def validate_no_access(url, user):
headers = {'Authorization': 'Bearer ' + user}
response = requests.get(headers=headers, url=url, verify=False)
assert not response.ok
return response
def update_istio_app(answers, user, app=None, ns=None, project=None):
if app is None:
app = namespace["istio_app"]
if ns is None:
ns = namespace["system_ns"]
if project is None:
project = namespace["system_project"]
p_client = get_system_client(user)
updated_answers = copy.deepcopy(DEFAULT_ANSWERS)
updated_answers.update(answers)
namespace["istio_app"] = p_client.update(
obj=app,
externalId=ISTIO_EXTERNAL_ID,
targetNamespace=ns.name,
projectId=project.id,
answers=updated_answers)
verify_istio_app_ready(p_client, namespace["istio_app"], 120, 120)
def create_and_verify_istio_app(p_client, ns, project):
print("creating istio catalog app")
app = p_client.create_app(
name="cluster-istio",
externalId=ISTIO_EXTERNAL_ID,
targetNamespace=ns.name,
projectId=project.id,
answers=DEFAULT_ANSWERS
)
verify_istio_app_ready(p_client, app, 120, 600)
return app
def delete_istio_app(user):
p_client = get_system_client(user)
p_client.delete(namespace["istio_app"])
def verify_istio_app_ready(p_client, app, install_timeout, deploy_timeout,
initial_run=True):
if initial_run:
print("Verify Istio App has installed and deployed properly")
if install_timeout <= 0 or deploy_timeout <= 0:
raise TimeoutError("Timeout waiting for istio to be properly "
"installed and deployed.") from None
elif 'conditions' in app and not initial_run:
for cond in app['conditions']:
if "False" in cond['status'] and 'message' in cond \
and "failed" in cond['message']:
raise AssertionError(
"Failed to properly install/deploy app. Reason: {}".format(
cond['message'])) from None
try:
wait_for_condition(p_client, app, check_condition('Installed', 'True'),
timeout=2)
except (Exception, TypeError):
verify_istio_app_ready(p_client, p_client.list_app(
name='cluster-istio').data[0], install_timeout-2, deploy_timeout,
initial_run=False)
try:
wait_for_condition(p_client, app, check_condition('Deployed', 'True'),
timeout=2)
except (Exception, TypeError):
verify_istio_app_ready(p_client, p_client.list_app(
name='cluster-istio').data[0], 2, deploy_timeout-2,
initial_run=False)
def get_urls():
_, cluster = get_user_client_and_cluster()
if namespace["istio_version"] == "0.1.0" \
or namespace["istio_version"] == "0.1.1":
kiali_url = os.environ.get('CATTLE_TEST_URL', "") + \
"/k8s/clusters/" + cluster.id + \
"/api/v1/namespaces/istio-system/services/" \
"http:kiali-http:80/proxy/"
else:
kiali_url = os.environ.get('CATTLE_TEST_URL', "") + \
"/k8s/clusters/" + cluster.id + \
"/api/v1/namespaces/istio-system/services/" \
"http:kiali:20001/proxy/"
tracing_url = os.environ.get('CATTLE_TEST_URL', "") + \
"/k8s/clusters/" + cluster.id + \
"/api/v1/namespaces/istio-system/services/" \
"http:tracing:80/proxy/jaeger/search"
grafana_url = os.environ.get('CATTLE_TEST_URL', "") + \
"/k8s/clusters/" + cluster.id + \
"/api/v1/namespaces/cattle-prometheus/services/" \
"http:access-grafana:80/proxy/dashboards/"
prometheus_url = os.environ.get('CATTLE_TEST_URL', "") + \
"/k8s/clusters/" + cluster.id + \
"/api/v1/namespaces/cattle-prometheus/services/" \
"http:access-prometheus:80/proxy/"
return kiali_url, tracing_url, grafana_url, prometheus_url
def verify_admission_webhook():
has_admission_webhook = execute_kubectl_cmd(
'api-versions | grep admissionregistration', False)
if len(has_admission_webhook) == 0:
raise AssertionError(
"MutatingAdmissionWebhook and ValidatingAdmissionWebhook plugins "
"are not listed in the kube-apiserver --enable-admission-plugins")
def add_istio_label_to_ns(c_client, ns):
labels = {
"istio-injection": "enabled"
}
ns = c_client.update_by_id_namespace(ns.id, labels=labels)
return ns
def create_and_test_bookinfo_services(p_client, ns, timeout=DEFAULT_TIMEOUT):
book_info_file_path = ISTIO_PATH + '/bookinfo.yaml'
execute_kubectl_cmd('apply -f ' + book_info_file_path + ' -n '
+ ns.name, False)
result = execute_kubectl_cmd('get deployment -n ' + ns.name, True)
for deployment in result['items']:
wl = p_client.list_workload(id='deployment:'
+ deployment['metadata']['namespace']
+ ':'
+ deployment['metadata']['name']).data[0]
wl = wait_for_wl_to_active(p_client, wl, 60)
wl_pods = wait_for_pods_in_workload(p_client, wl, 1)
wait_for_pod_to_running(p_client, wl_pods[0])
rating_pod = execute_kubectl_cmd('get pod -l app=ratings -n' + ns.name)
assert len(rating_pod['items']) == 1
rating_pod_name = rating_pod['items'][0]['metadata']['name']
try:
result = execute_kubectl_cmd(
'exec -it -n ' + ns.name + ' ' + rating_pod_name
+ ' -c ratings -- curl productpage:9080/productpage'
+ ' | grep -o "<title>.*</title>"', False)
except CalledProcessError:
result = None
start = time.time()
while result is None or result.rstrip() != ISTIO_BOOKINFO_QUERY_RESULT:
if time.time() - start > timeout:
raise AssertionError(
"Timed out and failed to get bookinfo service ready")
time.sleep(.5)
try:
result = execute_kubectl_cmd(
'exec -it -n ' + ns.name + ' ' + rating_pod_name
+ ' -c ratings -- curl productpage:9080/productpage'
+ ' | grep -o "<title>.*</title>"', False)
except CalledProcessError:
result = None
assert result.rstrip() == ISTIO_BOOKINFO_QUERY_RESULT
return result
def create_and_test_bookinfo_gateway(app_client, namespace,
gateway_url, timeout=DEFAULT_TIMEOUT):
servers = [{
"hosts": ["*"],
"port": {
"number": "80",
"protocol": "HTTP",
"name": "http"
}
}]
selector = {"istio": "ingressgateway"}
app_client.create_gateway(name="bookinfo-gateway",
namespaceId=namespace.id,
selector=selector,
servers=servers)
gateways = execute_kubectl_cmd('get gateway -n' + namespace.name, True)
assert len(gateways['items']) == 1
curl_cmd = 'curl -s http://' + gateway_url \
+ '/productpage | grep -o "<title>.*</title>"'
result = run_command(curl_cmd)
start = time.time()
while result is None or result.rstrip() != ISTIO_BOOKINFO_QUERY_RESULT:
if time.time() - start > timeout:
raise AssertionError(
"Timed out and failed to get bookinfo gateway ready")
time.sleep(.5)
result = run_command(curl_cmd)
assert result.rstrip() == ISTIO_BOOKINFO_QUERY_RESULT
return result
def create_bookinfo_virtual_service(app_client, namespace):
http = [{
"route": [{
"destination": {
"host": "productpage",
"port": {"number": 9080}
},
"weight": 100,
"portNumberOrName": "9080"
}],
"match": [
{"uri": {"exact": "/productpage"}},
{"uri": {"exact": "/login"}},
{"uri": {"exact": "/logout"}},
{"uri": {"prefix": "/api/v1/products"}}
]
}]
app_client.create_virtual_service(name="bookinfo",
namespaceId=namespace.id,
gateways=["bookinfo-gateway"],
http=http,
hosts=["*"])
def create_bookinfo_destination_rules(app_client, namespace):
subsets = [
{
"name": "v1",
"labels": {
"version": "v1"
}
},
{
"name": "v2",
"labels": {
"version": "v2"
}
},
{
"name": "v3",
"labels": {
"version": "v3"
}
}
]
app_client.create_destination_rule(namespaceId=namespace.id,
name="reviews",
host="reviews",
subsets=subsets)
def create_and_test_bookinfo_routing(app_client, namespace,
gateway_url, timeout=30):
http = [{
"route": [{
"destination": {
"subset": "v3",
"host": "reviews",
"port": {"number": 9080}
},
"weight": 100,
"portNumberOrName": "9080"
}]
}]
create_bookinfo_destination_rules(app_client, namespace)
app_client.create_virtual_service(name="reviews",
namespaceId=namespace.id,
http=http,
hosts=["reviews"])
curl_cmd = 'curl -s http://' + gateway_url \
+ '/productpage | grep -o "glyphicon-star"'
result = run_command(curl_cmd)
start = time.time()
while result is None or "glyphicon-star" not in result:
if time.time() - start > timeout:
raise AssertionError(
"Timed out and failed to get correct reviews version")
time.sleep(.5)
result = run_command(curl_cmd)
assert "glyphicon-star" in result
return result
# if grep returns no output, subprocess.check_output raises CalledProcessError
def run_command(command):
try:
return run_command_common(command)
except CalledProcessError:
return None
def get_system_client(user):
# Gets client and cluster using USER_TOKEN, who is a CLUSTER_OWNER
client, cluster = get_user_client_and_cluster()
projects = client.list_project(name='System', clusterId=cluster.id)
if len(projects.data) == 0:
raise AssertionError(
"System project not found in the cluster " + cluster.Name)
p = projects.data[0]
return get_project_client_for_token(p, user)
def add_user_to_cluster(username):
class User(object):
def __init__(self, u_name, user_id, token):
self.username = u_name
self.id = user_id
self.token = token
user_data = login_as_auth_user(username, AUTH_USER_PASSWORD)
u_id = user_data['userId']
u_token = user_data['token']
user_obj = User(username, u_id, u_token)
add_role_to_user(user_obj, CLUSTER_MEMBER)
# Enable one of these two below options to get around Issue #25365
get_client_for_token(u_token)
# headers = {'Authorization': 'Bearer ' + u_token}
# url = os.environ.get('CATTLE_TEST_URL', "") + "/v3/users?me=true"
# response = requests.get(headers=headers, url=url, verify=False)
@pytest.fixture()
def update_answers():
def _update_answers(answer_type, group=None):
answers = {
"kiali.enabled": "true",
"tracing.enabled": "true",
}
if answer_type == "allow_all_access":
additional_answers = {
"global.members[0].kind": "Group",
"global.members[0].name": "system:authenticated",
}
answers.update(additional_answers)
elif answer_type == "allow_group_access":
auth_admin = login_as_auth_user(load_setup_data()["admin_user"],
AUTH_USER_PASSWORD)
group_id = get_group_principal_id(group, token=auth_admin['token'])
additional_answers = {
"global.members[0].kind": "Group",
"global.members[0].name": group_id,
}
answers.update(additional_answers)
elif answer_type == "enable_certmanager":
additional_answers = {"certmanager.enabled": "true"}
answers.update(additional_answers)
elif answer_type == "enable_all_options_except_certmanager":
additional_answers = {
"gateways.istio-egressgateway.enabled": "true",
"gateways.istio-ilbgateway.enabled": "true",
"gateways.istio-ingressgateway.sds.enabled": "true",
"global.proxy.accessLogFile": "/dev/stdout",
"grafana.enabled": "true",
"istiocoredns.enabled": "true",
"kiali.dashboard.grafanaURL": "",
"kiali.prometheusAddr": "http://prometheus:9090",
"nodeagent.enabled": "true",
"nodeagent.env.CA_ADDR": "istio-citadel:8060",
"nodeagent.env.CA_PROVIDER": "Citadel",
"prometheus.enabled": "true",
}
answers.update(additional_answers)
update_istio_app(answers, USER_TOKEN)
return _update_answers
@pytest.fixture()
def default_access(update_answers):
update_answers("default_access")
@pytest.fixture()
def allow_all_access(update_answers):
update_answers("allow_all_access")
@pytest.fixture()
def enable_certmanager(update_answers):
update_answers("enable_certmanager")
@pytest.fixture()
def enable_all_options_except_certmanager(update_answers):
update_answers("enable_all_options_except_certmanager")
@pytest.fixture(scope='function')
def skipif_unsupported_istio_version(request):
if ISTIO_VERSION != "":
istio_version = ISTIO_VERSION
else:
client, _ = get_user_client_and_cluster()
istio_versions = list(client.list_template(
id=ISTIO_TEMPLATE_ID).data[0].versionLinks.keys())
istio_version = istio_versions[len(istio_versions) - 1]
if compare_versions(istio_version, "1.4.3") < 0:
pytest.skip("This test is not supported for older Istio versions")
@pytest.fixture(scope='function')
def auth_cluster_access(request):
group, noauth_user = get_a_group_and_a_user_not_in_it(
NESTED_GROUP_ENABLED)
users = get_user_by_group(group, NESTED_GROUP_ENABLED)
for user in users:
add_user_to_cluster(user)
add_user_to_cluster(noauth_user)
def fin():
auth_resource_cleanup()
request.addfinalizer(fin)
return group, users, noauth_user
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
global DEFAULT_ANSWERS
global ISTIO_EXTERNAL_ID
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
admin_client = get_admin_client()
ad_enabled = admin_client.by_id_auth_config("activedirectory").enabled
if AUTH_PROVIDER == "activeDirectory" and not ad_enabled:
enable_ad(load_setup_data()["admin_user"], ADMIN_TOKEN,
password=AUTH_USER_PASSWORD, nested=NESTED_GROUP_ENABLED)
projects = client.list_project(name='System', clusterId=cluster.id)
if len(projects.data) == 0:
raise AssertionError(
"System project not found in the cluster " + cluster.name)
p = projects.data[0]
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
istio_versions = list(client.list_template(
id=ISTIO_TEMPLATE_ID).data[0].versionLinks.keys())
istio_version = istio_versions[len(istio_versions) - 1]
if ISTIO_VERSION != "":
istio_version = ISTIO_VERSION
ISTIO_EXTERNAL_ID += istio_version
answers = {"global.rancher.clusterId": p.clusterId}
DEFAULT_ANSWERS.update(answers)
monitoring_answers = copy.deepcopy(C_MONITORING_ANSWERS)
monitoring_answers["prometheus.persistence.enabled"] = "false"
monitoring_answers["grafana.persistence.enabled"] = "false"
if cluster["enableClusterMonitoring"] is False:
client.action(cluster, "enableMonitoring",
answers=monitoring_answers)
if cluster["istioEnabled"] is False:
verify_admission_webhook()
ns = create_ns(c_client, cluster, p, 'istio-system')
app = create_and_verify_istio_app(p_client, ns, p)
else:
app = p_client.list_app(name='cluster-istio').data[0]
ns = c_client.list_namespace(name='istio-system').data[0]
update_istio_app(DEFAULT_ANSWERS, USER_TOKEN,
app=app, ns=ns, project=p)
istio_project, app_ns = create_project_and_ns(
USER_TOKEN, cluster,
random_test_name("istio-app"),
random_test_name("istio-app-ns"))
add_istio_label_to_ns(c_client, app_ns)
app_client = get_project_client_for_token(istio_project, USER_TOKEN)
istio_gateway_wl = p_client.by_id_workload('deployment:' +
ns.name +
':istio-ingressgateway')
assert istio_gateway_wl is not None
endpoints = istio_gateway_wl['publicEndpoints'][0]
gateway_url = endpoints['addresses'][0] + ':' + str(endpoints['port'])
namespace["gateway_url"] = gateway_url
namespace["app_ns"] = app_ns
namespace["app_client"] = app_client
namespace["system_ns"] = ns
namespace["system_project"] = p
namespace["istio_version"] = istio_version
namespace["istio_app"] = app
def fin():
client = get_user_client()
# delete the istio app
app = p_client.delete(namespace["istio_app"])
validate_app_deletion(p_client, app.id)
# delete the istio ns
p_client.delete(namespace["system_ns"])
# disable the cluster monitoring
c = client.reload(cluster)
if c["enableClusterMonitoring"] is True:
client.action(c, "disableMonitoring")
# delete the istio testing project
client.delete(istio_project)
request.addfinalizer(fin)
| 39,244 | 36.844744 | 115 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_cli.py
|
import ast
import os
import pytest
from .test_rke_cluster_provisioning import (create_and_validate_custom_host,
cluster_cleanup)
from .cli_objects import RancherCli
from .common import (ADMIN_TOKEN, USER_TOKEN, CATTLE_TEST_URL, CLUSTER_NAME,
DATA_SUBDIR, get_admin_client, get_user_client,
get_user_client_and_cluster, random_str,
get_project_client_for_token)
KNOWN_HOST = ast.literal_eval(os.environ.get('RANCHER_KNOWN_HOST', "False"))
if_test_multicluster = pytest.mark.skipif(ast.literal_eval(
os.environ.get('RANCHER_SKIP_MULTICLUSTER', "False")),
reason='Multi-Cluster tests are skipped in the interest of time/cost.')
SYSTEM_CHART_URL = "https://git.rancher.io/system-charts"
SYSTEM_CHART_BRANCH = os.environ.get("RANCHER_SYSTEM_CHART_BRANCH", "dev")
OPENEBS_CHART = 'openebs'
OPENEBS_CHART_VERSION = '1.5.0'
OPENEBS_CHART_VERSION_UPGRADE = '1.6.0'
CHARTMUSEUM_CHART = 'chartmuseum'
CHARTMUSEUM_CHART_VERSION = '2.3.1'
APP_TIMEOUT = 120
CATALOG_URL = "https://github.com/rancher/integration-test-charts.git"
BRANCH = "validation-tests"
CHARTMUSEUM_CHART_VERSION_CATALOG = 'latest'
# Supplying default answers due to issue with multi-cluster app install:
# https://github.com/rancher/rancher/issues/25514
MULTICLUSTER_APP_ANSWERS = {
"analytics.enabled": "true",
"defaultImage": "true",
"defaultPorts": "true",
"ndm.filters.excludePaths": "loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md",
"ndm.filters.excludeVendors": "CLOUDBYT,OpenEBS",
"ndm.sparse.count": "0",
"ndm.sparse.enabled": "true",
"ndm.sparse.path": "/var/openebs/sparse",
"ndm.sparse.size": "10737418240", "policies.monitoring.enabled": "true"
}
def test_cli_context_switch(rancher_cli: RancherCli):
rancher_cli.log.info("Testing Context Switching")
clusters = rancher_cli.get_clusters()
client = get_user_client()
projects = client.list_project()
assert len(projects) > 0
for project in projects:
rancher_cli.switch_context(project['id'])
cluster_name, project_name = rancher_cli.get_context()
assert any(cluster["id"] == project['clusterId']
and cluster["name"] == cluster_name for cluster in clusters)
assert project_name == project['name']
def test_cli_project_create(remove_cli_resource, rancher_cli: RancherCli):
rancher_cli.log.info("Testing Creating Projects")
initial_projects = rancher_cli.projects.get_current_projects()
project = rancher_cli.projects.create_project(use_context=False)
remove_cli_resource("project", project["id"])
assert project is not None
assert len(initial_projects) == len(
rancher_cli.projects.get_current_projects()) - 1
def test_cli_project_delete(remove_cli_resource, rancher_cli: RancherCli):
rancher_cli.log.info("Testing Deleting Projects")
initial_projects = rancher_cli.projects.get_current_projects()
project = rancher_cli.projects.create_project(use_context=False)
remove_cli_resource("project", project["id"])
assert project is not None
rancher_cli.projects.delete_project(project["name"])
assert len(initial_projects) == len(
rancher_cli.projects.get_current_projects())
def test_cli_namespace_create(remove_cli_resource, rancher_cli: RancherCli):
rancher_cli.log.info("Testing Creating Namespaces")
p1 = rancher_cli.projects.create_project()
remove_cli_resource("project", p1["id"])
namespace = rancher_cli.projects.create_namespace()
remove_cli_resource("namespace", namespace)
assert len(rancher_cli.projects.get_namespaces()) == 1
assert "{}|active".format(
namespace) in rancher_cli.projects.get_namespaces()
def test_cli_namespace_move(remove_cli_resource, rancher_cli: RancherCli):
rancher_cli.log.info("Testing Moving Namespaces")
p1 = rancher_cli.projects.create_project()
remove_cli_resource("project", p1["id"])
namespace = rancher_cli.projects.create_namespace()
remove_cli_resource("namespace", namespace)
assert len(rancher_cli.projects.get_namespaces()) == 1
p2 = rancher_cli.projects.create_project(use_context=False)
remove_cli_resource("project", p2["id"])
rancher_cli.projects.move_namespace(namespace, p2["id"])
assert len(rancher_cli.projects.get_namespaces()) == 0
rancher_cli.projects.switch_context(p2["id"])
assert len(rancher_cli.projects.get_namespaces()) == 1
assert "{}|active".format(
namespace) in rancher_cli.projects.get_namespaces()
def test_cli_namespace_delete(remove_cli_resource, rancher_cli: RancherCli):
rancher_cli.log.info("Testing Deleting Namespaces")
p1 = rancher_cli.projects.create_project()
remove_cli_resource("project", p1["id"])
namespace = rancher_cli.projects.create_namespace()
remove_cli_resource("namespace", namespace)
assert len(rancher_cli.projects.get_namespaces()) == 1
assert "{}|active".format(
namespace) in rancher_cli.projects.get_namespaces()
deleted = rancher_cli.projects.delete_namespace(namespace)
assert deleted
def test_cli_app_install(remove_cli_resource, rancher_cli: RancherCli):
rancher_cli.log.info("Testing Upgrading Apps")
initial_app = rancher_cli.apps.install(
OPENEBS_CHART, "openebs", version=OPENEBS_CHART_VERSION,
timeout=APP_TIMEOUT)
remove_cli_resource("apps", initial_app["id"])
assert initial_app["state"] == "active"
assert initial_app["version"] == OPENEBS_CHART_VERSION
def test_cli_app_values_install(remove_cli_resource, rancher_cli: RancherCli):
rancher_cli.log.info("Testing Upgrading Apps")
initial_app = rancher_cli.apps.install(
CHARTMUSEUM_CHART, random_str(), version=CHARTMUSEUM_CHART_VERSION,
timeout=APP_TIMEOUT, values=DATA_SUBDIR + "/appvalues.yaml")
remove_cli_resource("apps", initial_app["id"])
assert initial_app["state"] == "active"
assert initial_app["version"] == CHARTMUSEUM_CHART_VERSION
def test_cli_app_upgrade(remove_cli_resource, rancher_cli: RancherCli):
rancher_cli.log.info("Testing Rolling Back Apps")
initial_app = rancher_cli.apps.install(
OPENEBS_CHART, "openebs", version=OPENEBS_CHART_VERSION,
timeout=APP_TIMEOUT)
remove_cli_resource("apps", initial_app["id"])
assert initial_app["version"] == OPENEBS_CHART_VERSION
upgraded_app = rancher_cli.apps.upgrade(
initial_app, version=OPENEBS_CHART_VERSION_UPGRADE)
assert upgraded_app["state"] == "active"
assert upgraded_app["version"] == OPENEBS_CHART_VERSION_UPGRADE
def test_cli_app_rollback(remove_cli_resource, rancher_cli: RancherCli):
rancher_cli.log.info("Testing Deleting Apps")
initial_app = rancher_cli.apps.install(
OPENEBS_CHART, "openebs", version=OPENEBS_CHART_VERSION,
timeout=APP_TIMEOUT)
remove_cli_resource("apps", initial_app["id"])
assert initial_app["version"] == OPENEBS_CHART_VERSION
upgraded_app = rancher_cli.apps.upgrade(
initial_app, version=OPENEBS_CHART_VERSION_UPGRADE)
assert upgraded_app["version"] == OPENEBS_CHART_VERSION_UPGRADE
rolled_back_app = rancher_cli.apps.rollback(upgraded_app,
OPENEBS_CHART_VERSION)
assert rolled_back_app["state"] == "active"
assert rolled_back_app["version"] == OPENEBS_CHART_VERSION
def test_cli_app_delete(rancher_cli: RancherCli):
rancher_cli.log.info("Testing Deleting Apps")
initial_app = rancher_cli.apps.install(
OPENEBS_CHART, "openebs", version=OPENEBS_CHART_VERSION,
timeout=APP_TIMEOUT)
deleted = rancher_cli.apps.delete(initial_app)
assert deleted
def test_cli_app_install_local_dir(remove_cli_resource, rancher_cli: RancherCli):
rancher_cli.log.info("Testing Installing of an App from Local directory")
initial_app = rancher_cli.apps.install_local_dir(
CATALOG_URL, BRANCH, CHARTMUSEUM_CHART,
version=CHARTMUSEUM_CHART_VERSION_CATALOG, timeout=APP_TIMEOUT)
remove_cli_resource("apps", initial_app["id"])
assert initial_app["state"] == "active"
@if_test_multicluster
def test_cli_multiclusterapp_install(custom_cluster, remove_cli_resource,
rancher_cli: RancherCli):
rancher_cli.log.info("Testing Installing Multi-Cluster Apps")
# Get list of projects to use and ensure that it is 2 or greater
client = get_admin_client()
projects = client.list_project()
targets = []
for project in projects:
if project["name"] == "Default":
rancher_cli.switch_context(project['id'])
cluster_name, project_name = rancher_cli.get_context()
if cluster_name in [custom_cluster.name, CLUSTER_NAME]:
rancher_cli.log.debug("Using cluster: %s", cluster_name)
targets.append(project["id"])
assert len(targets) > 1
initial_app = rancher_cli.mcapps.install(
OPENEBS_CHART, targets=targets, role="cluster-owner",
values=MULTICLUSTER_APP_ANSWERS, version=OPENEBS_CHART_VERSION,
timeout=APP_TIMEOUT)
remove_cli_resource("mcapps", initial_app["name"])
assert initial_app["state"] == "active"
assert initial_app["version"] == OPENEBS_CHART_VERSION
assert len(initial_app["targets"]) == len(targets)
for target in initial_app["targets"]:
assert target["state"] == "active"
assert target["version"] == OPENEBS_CHART_VERSION
@if_test_multicluster
def test_cli_multiclusterapp_upgrade(custom_cluster, remove_cli_resource,
rancher_cli: RancherCli):
rancher_cli.log.info("Testing Upgrading Multi-Cluster Apps")
# Get list of projects to use and ensure that it is 2 or greater
client = get_admin_client()
projects = client.list_project()
targets = []
for project in projects:
if project["name"] == "Default":
rancher_cli.switch_context(project['id'])
cluster_name, project_name = rancher_cli.get_context()
if cluster_name in [custom_cluster.name, CLUSTER_NAME]:
rancher_cli.log.debug("Using cluster: %s", cluster_name)
targets.append(project["id"])
assert len(targets) > 1
initial_app = rancher_cli.mcapps.install(
OPENEBS_CHART, targets=targets, role="cluster-owner",
values=MULTICLUSTER_APP_ANSWERS, version=OPENEBS_CHART_VERSION,
timeout=APP_TIMEOUT)
remove_cli_resource("mcapps", initial_app["name"])
assert initial_app["version"] == OPENEBS_CHART_VERSION
upgraded_app = rancher_cli.mcapps.upgrade(
initial_app, version=OPENEBS_CHART_VERSION_UPGRADE,
timeout=APP_TIMEOUT)
assert upgraded_app["state"] == "active"
assert upgraded_app["version"] == OPENEBS_CHART_VERSION_UPGRADE
assert upgraded_app["id"] == initial_app["id"]
assert len(upgraded_app["targets"]) == len(targets)
for target in upgraded_app["targets"]:
assert target["state"] == "active"
assert target["version"] == OPENEBS_CHART_VERSION_UPGRADE
@if_test_multicluster
def test_cli_multiclusterapp_rollback(custom_cluster, remove_cli_resource,
rancher_cli: RancherCli):
rancher_cli.log.info("Testing Rolling Back Multi-Cluster Apps")
# Get list of projects to use and ensure that it is 2 or greater
client = get_admin_client()
projects = client.list_project()
targets = []
for project in projects:
if project["name"] == "Default":
rancher_cli.switch_context(project['id'])
cluster_name, project_name = rancher_cli.get_context()
if cluster_name in [custom_cluster.name, CLUSTER_NAME]:
rancher_cli.log.debug("Using cluster: %s", cluster_name)
targets.append(project["id"])
assert len(targets) > 1
initial_app = rancher_cli.mcapps.install(
OPENEBS_CHART, targets=targets, role="cluster-owner",
values=MULTICLUSTER_APP_ANSWERS, version=OPENEBS_CHART_VERSION,
timeout=APP_TIMEOUT)
remove_cli_resource("mcapps", initial_app["name"])
assert initial_app["version"] == OPENEBS_CHART_VERSION
upgraded_app = rancher_cli.mcapps.upgrade(
initial_app, version=OPENEBS_CHART_VERSION_UPGRADE,
timeout=APP_TIMEOUT)
assert upgraded_app["version"] == OPENEBS_CHART_VERSION_UPGRADE
rolled_back_app = rancher_cli.mcapps.rollback(
upgraded_app["name"], initial_app["revision"], timeout=APP_TIMEOUT)
assert rolled_back_app["state"] == "active"
assert rolled_back_app["version"] == OPENEBS_CHART_VERSION
assert rolled_back_app["id"] == upgraded_app["id"]
assert len(rolled_back_app["targets"]) == len(targets)
for target in rolled_back_app["targets"]:
assert target["state"] == "active"
assert target["version"] == OPENEBS_CHART_VERSION
@if_test_multicluster
def test_cli_multiclusterapp_delete(custom_cluster, remove_cli_resource,
rancher_cli: RancherCli):
rancher_cli.log.info("Testing Deleting Multi-Cluster Apps")
# Get list of projects to use and ensure that it is 2 or greater
client = get_admin_client()
projects = client.list_project()
targets = []
for project in projects:
if project["name"] == "Default":
rancher_cli.switch_context(project['id'])
cluster_name, project_name = rancher_cli.get_context()
if cluster_name in [custom_cluster.name, CLUSTER_NAME]:
rancher_cli.log.debug("Using cluster: %s", cluster_name)
targets.append(project["id"])
assert len(targets) > 1
initial_app = rancher_cli.mcapps.install(
OPENEBS_CHART, targets=targets, role="cluster-owner",
values=MULTICLUSTER_APP_ANSWERS, version=OPENEBS_CHART_VERSION,
timeout=APP_TIMEOUT)
assert initial_app["version"] == OPENEBS_CHART_VERSION
deleted, apps_deleted = rancher_cli.mcapps.delete(initial_app)
assert deleted
assert apps_deleted
def test_cli_catalog(admin_cli: RancherCli):
admin_cli.log.info("Testing Creating and Deleting Catalogs")
admin_cli.login(CATTLE_TEST_URL, ADMIN_TOKEN)
catalog = admin_cli.catalogs.add(SYSTEM_CHART_URL,
branch=SYSTEM_CHART_BRANCH)
assert catalog is not None
deleted = admin_cli.catalogs.delete(catalog["name"])
assert deleted
@if_test_multicluster
def test_cluster_removal(custom_cluster, admin_cli: RancherCli):
admin_cli.log.info("Testing Cluster Removal")
deleted = admin_cli.clusters.delete(custom_cluster.name)
assert deleted
def test_inspection(rancher_cli: RancherCli):
# Test inspect on the default project used for cli tests
# Validate it has the expected clusterid, id, type, and active state
rancher_cli.log.info("Testing Inspect Resource")
resource = rancher_cli.inspect(
"project", rancher_cli.default_project["id"],
format="{{.clusterId}}|{{.id}}|{{.type}}|{{.state}}")
assert resource is not None
resource_arr = resource.split("|")
assert resource_arr[0] == rancher_cli.default_project["clusterId"]
assert resource_arr[1] == rancher_cli.default_project["id"]
assert resource_arr[2] == "project"
assert resource_arr[3] == "active"
def test_ps(custom_workload, rancher_cli: RancherCli):
rancher_cli.log.info("Testing rancher ps")
# Deploy a workload and validate that the ps command shows it in the
# correct namespace with the correct name
rancher_cli.switch_context(rancher_cli.DEFAULT_CONTEXT)
ps = rancher_cli.ps()
expected_value = "{}|{}|nginx|2".format(
rancher_cli.default_namespace, custom_workload.name)
assert expected_value in ps.splitlines()
def test_kubectl(custom_workload, rancher_cli: RancherCli):
rancher_cli.log.info("Testing kubectl commands from the CLI")
rancher_cli.switch_context(rancher_cli.DEFAULT_CONTEXT)
jsonpath = "-o jsonpath='{.spec.template.spec.containers[0].image}'"
result = rancher_cli.kubectl("get deploy -n {} {} {}".format(
rancher_cli.default_namespace, custom_workload.name, jsonpath))
assert result == "nginx"
# Note this expects nodes not to be Windows due to usage of ifconfig.me
@pytest.mark.skip(reason="Fails in Jenkins")
def test_ssh(rancher_cli: RancherCli):
rancher_cli.log.info("Testing ssh into nodes.")
failures = []
rancher_cli.switch_context(rancher_cli.DEFAULT_CONTEXT)
nodes = rancher_cli.nodes.get()
rancher_cli.log.debug("Nodes is: {}".format(nodes))
is_jenkins = False
if os.environ.get("RANCHER_IS_JENKINS", None):
is_jenkins = True
for node in nodes:
ip = rancher_cli.nodes.ssh(node, "curl -s ifconfig.me",
known=KNOWN_HOST, is_jenkins=is_jenkins)
if node["ip"] != ip:
failures.append(node["ip"])
assert failures == []
@pytest.fixture(scope='module')
def custom_workload(rancher_cli):
client, cluster = get_user_client_and_cluster()
project = client.list_project(name=rancher_cli.default_project["name"],
clusterId=cluster.id).data[0]
p_client = get_project_client_for_token(project, USER_TOKEN)
workload = p_client.create_workload(
name=random_str(),
namespaceId=rancher_cli.default_namespace,
scale=2,
containers=[{
'name': 'one',
'image': 'nginx',
}])
return workload
@pytest.fixture(scope='module')
def custom_cluster(request, rancher_cli):
rancher_cli.log.info("Creating cluster in AWS to test CLI actions that "
"require more than one cluster. Please be patient, "
"as this takes some time...")
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
cluster, aws_nodes = create_and_validate_custom_host(
node_roles, random_cluster_name=True)
def fin():
cluster_cleanup(get_admin_client(), cluster, aws_nodes)
request.addfinalizer(fin)
return cluster
@pytest.fixture
def admin_cli(request, rancher_cli) -> RancherCli:
"""
Login occurs at a global scope, so need to ensure we log back in as the
user in a finalizer so that future tests have no issues.
"""
rancher_cli.login(CATTLE_TEST_URL, ADMIN_TOKEN)
def fin():
rancher_cli.login(CATTLE_TEST_URL, USER_TOKEN)
request.addfinalizer(fin)
return rancher_cli
@pytest.fixture(scope='module', autouse="True")
def rancher_cli(request) -> RancherCli:
client, cluster = get_user_client_and_cluster()
project_id = client.list_project(name='Default',
clusterId=cluster.id).data[0]["id"]
cli = RancherCli(CATTLE_TEST_URL, USER_TOKEN, project_id)
def fin():
cli.cleanup()
request.addfinalizer(fin)
return cli
@pytest.fixture
def remove_cli_resource(request, rancher_cli):
"""Remove a resource after a test finishes even if the test fails.
How to use:
pass this function as an argument of your testing function,
then call this function with the resource type and its id
as arguments after creating any new resource
"""
def _cleanup(resource, r_id):
def clean():
rancher_cli.switch_context(rancher_cli.DEFAULT_CONTEXT)
rancher_cli.log.info("Cleaning up {}: {}".format(resource, r_id))
rancher_cli.run_command("{} delete {}".format(resource, r_id),
expect_error=True)
request.addfinalizer(clean)
return _cleanup
| 19,770 | 41.065957 | 81 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_group_grb.py
|
from .test_auth import enable_ad, load_setup_data
from .common import * # NOQA
from rancher import ApiError
import pytest
import requests
'''
Prerequisite:
- Enable Auth
- Optional: searching nested group enabled
- All users used for testing global group role binding should not be used
to create the cluster when preparing the Rancher setup
'''
# values used to create a catalog
BRANCH = "dev"
URL = "https://git.rancher.io/system-charts"
# the link to search principals in the auth provider
@if_test_group_rbac
def test_ggrb_1(remove_resource):
""" test that when a global role is assigned to a group,
all users in the group will get the permission;
when the ggrb is removed, all users in the group will lose
the permission.
the default global role "catalogs-manage" is used
"""
target_group_name = get_group(NESTED_GROUP_ENABLED)
users = get_user_by_group(target_group_name, NESTED_GROUP_ENABLED)
# check that users can not create catalogs
for user in users:
validate_permission_create_catalog(user, False)
auth_admin = login_as_auth_user(load_setup_data()["admin_user"],
AUTH_USER_PASSWORD)
g_id = get_group_principal_id(target_group_name, token=auth_admin['token'])
ggrb = get_admin_client().create_global_role_binding(
globalRoleId="catalogs-manage", groupPrincipalId=g_id)
# check that users can create catalogs now
for user in users:
catalog = validate_permission_create_catalog(user, True)
remove_resource(catalog)
# delete the ggrb
get_admin_client().delete(ggrb)
# check that users can not create catalogs
for user in users:
validate_permission_create_catalog(user, False)
@if_test_group_rbac
def test_ggrb_2(remove_resource):
""" test that after editing the global role, users'
permissions reflect the changes
Steps:
- users in group1 cannot list clusters or create catalogs
- create a custom global role gr1 that permits creating catalogs
- create the global group role binding ggrb1 to bind gr1 to group1
- users in group1 can create catalogs now
- edit gr1 to remove the permission of creating catalogs, add the
permission of listing clusters
- users in group1 cannot create catalogs, but can list clusters
"""
target_group_name = get_group(NESTED_GROUP_ENABLED)
users = get_user_by_group(target_group_name, NESTED_GROUP_ENABLED)
# check that users can not create catalogs or list clusters
for user in users:
validate_permission_create_catalog(user, False)
validate_permission_list_cluster(user, 0)
# create a custom global role that permits create catalogs
admin_c = get_admin_client()
template = generate_template_global_role(name=random_name(),
template=TEMPLATE_MANAGE_CATALOG)
gr = admin_c.create_global_role(template)
remove_resource(gr)
auth_admin = login_as_auth_user(load_setup_data()["admin_user"],
AUTH_USER_PASSWORD)
g_id = get_group_principal_id(target_group_name, token=auth_admin['token'])
ggrb = get_admin_client().create_global_role_binding(
globalRoleId=gr["id"], groupPrincipalId=g_id)
# check that users can create catalogs now, but not list clusters
for user in users:
validate_permission_list_cluster(user, 0)
catalog = validate_permission_create_catalog(user, True)
remove_resource(catalog)
# edit the global role
rules = [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"get",
"list",
"watch"
],
"resources": [
"clusters"
]
}
]
admin_c.update(gr, rules=rules)
target_num = len(admin_c.list_cluster().data)
# check that users can list clusters, but not create catalogs
for user in users:
validate_permission_create_catalog(user, False)
validate_permission_list_cluster(user, target_num)
# delete the ggrb
admin_c.delete(ggrb)
for user in users:
validate_permission_list_cluster(user, 0)
@if_test_group_rbac
def test_ggrb_3(remove_resource):
""" test that when a global role is assigned to a group,
all users in the group get the permission from the role,
users not in the group do not get the permission
Steps:
- users in the group cannot list clusters
- users not in the group cannot list clusters
- create a custom global role gr1 that permits listing clusters
- create the global group role binding ggrb1 to bind gr1 to the group
- users in the group can list clusters now
- users not in group still cannot list clusters
- delete the ggrb1
- users in the group can not list clusters
"""
target_g, user1 = get_a_group_and_a_user_not_in_it(NESTED_GROUP_ENABLED)
users = get_user_by_group(target_g, NESTED_GROUP_ENABLED)
# check that users in the group can not list clusters
for user in users:
validate_permission_list_cluster(user, 0)
# check that user not in the group can not list clusters
validate_permission_list_cluster(user1, 0)
auth_admin = login_as_auth_user(load_setup_data()["admin_user"],
AUTH_USER_PASSWORD)
g_id = get_group_principal_id(target_g, token=auth_admin['token'])
# create a custom global role that permits listing clusters
admin_c = get_admin_client()
template = generate_template_global_role(name=random_name(),
template=TEMPLATE_LIST_CLUSTER)
gr = admin_c.create_global_role(template)
remove_resource(gr)
ggrb = admin_c.create_global_role_binding(globalRoleId=gr["id"],
groupPrincipalId=g_id)
remove_resource(ggrb)
target_num = len(admin_c.list_cluster().data)
# check that users in the group can list clusters
for user in users:
validate_permission_list_cluster(user, target_num)
# check that user not in the group can not list clusters
validate_permission_list_cluster(user1, 0)
# delete the ggrb
admin_c.delete(ggrb)
# check that users in the group can not list clusters
for user in users:
validate_permission_list_cluster(user, 0)
# cluster owner, cluster member, project owner, project member
# and project read-only can not list ggrb
rbac_list_ggrb = [
(CLUSTER_OWNER, 0),
(CLUSTER_MEMBER, 0),
(PROJECT_OWNER, 0),
(PROJECT_MEMBER, 0),
(PROJECT_READ_ONLY, 0),
]
# cluster owner, cluster member, project owner, project member
# and project read-only can not create or delete ggrb
rbac_create_delete_ggrb = [
(CLUSTER_OWNER, False),
(CLUSTER_MEMBER, False),
(PROJECT_OWNER, False),
(PROJECT_MEMBER, False),
(PROJECT_READ_ONLY, False),
]
@if_test_rbac
@pytest.mark.parametrize(["role", "expected_count"], rbac_list_ggrb)
def test_rbac_ggrb_list(role, expected_count):
token = rbac_get_user_token_by_role(role)
validate_permission_list_ggrb(token, expected_count)
@if_test_rbac
@pytest.mark.parametrize(["role", "permission"], rbac_create_delete_ggrb)
def test_rbac_ggrb_create(role, permission):
token = rbac_get_user_token_by_role(role)
validate_permission_create_ggrb(token, permission)
@if_test_rbac
@pytest.mark.parametrize(["role", "permission"], rbac_create_delete_ggrb)
def test_rbac_ggrb_delete(role, permission):
token = rbac_get_user_token_by_role(role)
validate_permission_delete_ggrb(token, permission)
def validate_permission_list_cluster(username, num=0):
""" check if the user from auth provider has the permission to
list clusters
:param username: username from the auth provider
:param num: expected number of clusters
"""
token = login_as_auth_user(username, AUTH_USER_PASSWORD)['token']
user_client = get_client_for_token(token)
clusters = user_client.list_cluster().data
assert len(clusters) == num
def validate_permission_create_catalog(username, permission=False):
""" check if the user from auth provider has the permission to
create new catalog
"""
name = random_name()
token = login_as_auth_user(username, AUTH_USER_PASSWORD)['token']
return validate_create_catalog(token, catalog_name=name, branch=BRANCH,
url=URL, permission=permission)
def validate_permission_list_ggrb(token, num=0):
""" check if the user from auth provider has the permission to
list global role bindings
"""
user_client = get_client_for_token(token)
clusters = user_client.list_global_role_binding().data
assert len(clusters) == num
def validate_permission_create_ggrb(token, permission=False):
""" check if the user from auth provider has the permission to
create group global role bindings
"""
target_group_name = get_group()
auth_admin = login_as_auth_user(load_setup_data()["admin_user"],
AUTH_USER_PASSWORD)
g_id = get_group_principal_id(target_group_name, token=auth_admin['token'])
role = generate_a_global_role()
client = get_client_for_token(token)
if not permission:
with pytest.raises(ApiError) as e:
client.create_global_role_binding(globalRoleId=role["id"],
groupPrincipalId=g_id)
assert e.value.error.status == 403 and \
e.value.error.code == 'Forbidden', \
"user with no permission should receive 403: Forbidden"
return None
else:
try:
rtn = \
client.create_global_role_binding(globalRoleId=role["id"],
groupPrincipalId=g_id)
return rtn
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ str(e.error.status) + " " + e.error.code
def validate_permission_delete_ggrb(token, permission=False):
""" check if the user from auth provider has the permission to
deleting group global role bindings
"""
ggrb = validate_permission_create_ggrb(ADMIN_TOKEN, True)
client = get_client_for_token(token)
if not permission:
with pytest.raises(ApiError) as e:
client.delete(ggrb)
assert e.value.error.status == 403 and \
e.value.error.code == 'Forbidden', \
"user with no permission should receive 403: Forbidden"
get_admin_client().delete(ggrb)
else:
try:
client.delete(ggrb)
except ApiError as e:
get_admin_client().delete(ggrb)
assert False, "user with permission should receive no exception:" \
+ str(e.error.status) + " " + e.error.code
def generate_a_global_role():
""" return a global role with the permission of listing cluster"""
admin_c = get_admin_client()
template = generate_template_global_role(name=random_name(),
template=TEMPLATE_LIST_CLUSTER)
return admin_c.create_global_role(template)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
admin_client = get_admin_client()
ad_enabled = admin_client.by_id_auth_config("activedirectory").enabled
if AUTH_PROVIDER == "activeDirectory" and not ad_enabled:
enable_ad(load_setup_data()["admin_user"], ADMIN_TOKEN,
password=AUTH_USER_PASSWORD, nested=NESTED_GROUP_ENABLED)
if NESTED_GROUP_ENABLED:
assert is_nested(), "no nested group is found"
| 11,936 | 37.015924 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_nfs.py
|
import pytest
from .common import * # NOQA
namespace = {"p_client": None,
"ns": None,
"cluster": None,
"project": None,
"pv": None,
"pvc": None}
# this is the path to the mounted dir in the NFS server
NFS_SERVER_MOUNT_PATH = "/nfs"
# this is the path to the mounted dir in the pod(workload)
MOUNT_PATH = "/var/nfs"
# if True then delete the NFS after finishing tests, otherwise False
DELETE_NFS = eval(os.environ.get('RANCHER_DELETE_NFS', "True"))
def test_nfs_wl_deployment():
p_client = namespace["p_client"]
ns = namespace["ns"]
pvc_name = namespace["pvc"].name
wl_name = sub_path = random_test_name("deployment")
content = "from-test-wl-deployment"
file_path = MOUNT_PATH + "/wl-deployment.txt"
# deploy the first workload
wl = create_wl_with_nfs(p_client, ns.id, pvc_name, wl_name,
mount_path=MOUNT_PATH, sub_path=sub_path)
validate_workload(p_client, wl, "deployment", ns.name)
# check if it can write data
pods = p_client.list_pod(workloadId=wl.id).data
assert len(pods) == 1
pod = pods[0]
write_content_to_file(pod, content, file_path)
validate_file_content(pod, content, file_path)
# delete the workload
p_client.delete(wl)
# deploy second workload for testing
wl2 = create_wl_with_nfs(p_client, ns.id, pvc_name, "deployment-wl2",
mount_path=MOUNT_PATH, sub_path=sub_path)
validate_workload(p_client, wl2, "deployment", ns.name)
# check if it can read existing data
pods.clear()
pods = p_client.list_pod(workloadId=wl2.id).data
assert len(pods) == 1
pod = pods[0]
validate_file_content(pod, content, file_path)
def test_nfs_wl_scale_up():
p_client = namespace["p_client"]
ns = namespace["ns"]
pvc_name = namespace["pvc"].name
wl_name = sub_path = random_test_name("scale-up")
content = "from-nfs-wl-scale-up"
file_path = MOUNT_PATH + "/wl-scale-up.txt"
# deploy the workload
wl = create_wl_with_nfs(p_client, ns.id, pvc_name, wl_name,
mount_path=MOUNT_PATH, sub_path=sub_path)
validate_workload(p_client, wl, "deployment", ns.name)
# write some data
pods = p_client.list_pod(workloadId=wl.id).data
assert len(pods) == 1
pod = pods[0]
write_content_to_file(pod, content, file_path)
validate_file_content(pod, content, file_path)
# scale up the workload
p_client.update(wl, scale=2)
wl = wait_for_wl_to_active(p_client, wl)
pods.clear()
pods = wait_for_pods_in_workload(p_client, wl, 2)
assert len(pods) == 2
for pod in pods:
validate_file_content(pod, content, file_path)
def test_nfs_wl_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
pvc_name = namespace["pvc"].name
wl_name = sub_path = random_test_name("upgrade")
content = "from-nfs-wl-upgrade"
file_path = MOUNT_PATH + "/wl-upgrade.txt"
# deploy the workload
wl = create_wl_with_nfs(p_client, ns.id, pvc_name, wl_name,
mount_path=MOUNT_PATH, sub_path=sub_path)
validate_workload(p_client, wl, "deployment", ns.name)
pods = p_client.list_pod(workloadId=wl.id).data
assert len(pods) == 1
pod = pods[0]
# write some data
write_content_to_file(pod, content, file_path)
validate_file_content(pod, content, file_path)
# upgrade the workload
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": [{"readOnly": "False",
"type": "volumeMount",
"mountPath": "/var/nfs",
"subPath": sub_path,
"name": "vol1"
}],
"environment": {"REASON": "upgrade"}
}]
p_client.update(wl, containers=con)
wl = wait_for_wl_to_active(p_client, wl)
# check if it can read existing data
pods.clear()
pods = wait_for_pods_in_workload(p_client, wl, 1)
assert len(pods) == 1
pod = pods[0]
validate_file_content(pod, content, file_path)
# check if it can write some data
content = content + "+after-upgrade"
write_content_to_file(pod, content, file_path)
validate_file_content(pod, content, file_path)
def test_nfs_wl_daemonSet():
p_client = namespace["p_client"]
cluster = namespace["cluster"]
ns = namespace["ns"]
pvc_name = namespace["pvc"].name
wl_name = sub_path = random_test_name("daemon-set")
content = "from-nfs-wl-daemon-set"
file_path = MOUNT_PATH + "/" + "/wl-daemon-set.txt"
# deploy the workload
wl = create_wl_with_nfs(p_client, ns.id, pvc_name, wl_name,
MOUNT_PATH, sub_path, is_daemonSet=True)
schedulable_node_count = len(get_schedulable_nodes(cluster))
validate_workload(p_client, wl, "daemonSet",
ns.name, schedulable_node_count)
# for each pod, write some data to the file,
# then check if changes can be seen in all pods
pods = p_client.list_pod(workloadId=wl.id).data
for pod in pods:
content = content + "+" + pod.name
write_content_to_file(pod, content, file_path)
for item in pods:
validate_file_content(item, content, file_path)
@pytest.fixture(scope="module", autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "project-test-nfs")
p_client = get_project_client_for_token(p, USER_TOKEN)
nfs_node = provision_nfs_server()
nfs_ip = nfs_node.get_public_ip()
print("the IP of the NFS: ", nfs_ip)
# add persistent volume to the cluster
cluster_client = get_cluster_client_for_token(cluster, USER_TOKEN)
pv_object, pvc_object = create_pv_pvc(p_client, ns, nfs_ip, cluster_client)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["pv"] = pv_object
namespace["pvc"] = pvc_object
def fin():
cluster_client = get_cluster_client_for_token(namespace["cluster"],
USER_TOKEN)
cluster_client.delete(namespace["project"])
cluster_client.delete(namespace["pv"])
if DELETE_NFS is True:
AmazonWebServices().delete_node(nfs_node)
request.addfinalizer(fin)
| 6,549 | 34.79235 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_kdm_changes.py
|
from copy import deepcopy
import os
import ast
import requests
from .common import json
from .common import CATTLE_API_URL
from .common import create_config_file
from .common import CATTLE_TEST_URL
from .common import ADMIN_TOKEN
from .common import get_setting_value_by_name
from .common import get_user_client
from .common import USER_TOKEN
from .common import validate_cluster
from .test_rke_cluster_provisioning import HOST_NAME
from .test_rke_cluster_provisioning import random_name
from .test_rke_cluster_provisioning import rke_config
from .test_rke_cluster_provisioning import K8S_VERSION
from .test_rke_cluster_provisioning import random_test_name
from .test_rke_cluster_provisioning import get_custom_host_registration_cmd
from lib.aws import AmazonWebServices
import multiprocessing
K8S_VERSION_URL = "/settings/k8s-versions-current"
NETWORK_PLUGINS = ["calico", "canal", "flannel", "weave"]
DNS_PROVIDERS = ["coredns", "kube-dns"]
CLUSTER_LIST = []
NODE_COUNT_KDM_CLUSTER = \
int(os.environ.get("RANCHER_NODE_COUNT_KDM_CLUSTER", 4))
DNS_MATRIX = \
ast.literal_eval(os.environ.get('RANCHER_DNS_PROVIDER_MATRIX', "False"))
def test_clusters_for_kdm():
"""
This fuction is used to check the KDM changes.
It deploys all the different types of k8s clusters - default_k8s_versions,
across all the network provider types -
NETWORK_PLUGINS, and all dns provider types - DNS_PROVIDERS
It then deploys a workload on each cluster,
checks service discovery - DNS resolution and
checks the ingress when enabled
Helper function - validate_custom_cluster_kdm() to create the AWS nodes
and add to the cluster
"""
rancher_version = get_setting_value_by_name('server-version')
if K8S_VERSION == "":
if str(rancher_version).startswith('v2.2'):
k8s_v = get_setting_value_by_name('k8s-version-to-images')
default_k8s_versions = json.loads(k8s_v).keys()
else:
k8s_v = get_setting_value_by_name('k8s-versions-current')
default_k8s_versions = k8s_v.split(",")
else:
default_k8s_versions = K8S_VERSION.split(",")
list_process = []
network_plugins = NETWORK_PLUGINS
dns_providers = DNS_PROVIDERS
print("default_k8s_versions: ", default_k8s_versions)
for k8s_version in default_k8s_versions:
rke_config_new = deepcopy(rke_config)
rke_config_new["kubernetesVersion"] = k8s_version
node_count = NODE_COUNT_KDM_CLUSTER * len(network_plugins)
if DNS_MATRIX:
node_count = node_count * len(dns_providers) * 2
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
node_count, random_test_name(HOST_NAME))
i = 0
for network_plugin in network_plugins:
if network_plugin == "calico" or \
network_plugin == "canal" or network_plugin == "weave":
rke_config_new["network"]["options"] = \
{"flannel_backend_type": "vxlan"}
rke_config_new["network"] = {"type": "networkConfig",
"plugin": network_plugin}
if DNS_MATRIX:
for dns_provider in dns_providers:
for nodelocaldns in True, False:
dns_entry, cluster_name = get_dns_rke_config(
dns_provider,
network_plugin,
nodelocaldns
)
rke_config_new.update(dns_entry)
list_process = create_kdm_clusters(
cluster_name,
rke_config_new,
aws_nodes,
i,
list_process
)
i = i + NODE_COUNT_KDM_CLUSTER
else:
cluster_name = random_test_name(network_plugin)
list_process = create_kdm_clusters(
cluster_name,
rke_config_new,
aws_nodes,
i,
list_process
)
i = i + NODE_COUNT_KDM_CLUSTER
failed_cluster = {}
passed_cluster = {}
for process in list_process:
process.join()
# setting environment variables
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n"
names = ""
i = 0
for cluster in CLUSTER_LIST:
env_details += \
"env.CLUSTER_NAME_" + str(i) + "='" + cluster.name + "'\n"
names += cluster.name + ","
i = i + 1
create_config_file(env_details)
print("env_details:", env_details)
print("list of cluster names: " + names[:-1])
client = get_user_client()
for cluster in CLUSTER_LIST:
try:
validate_cluster(client, cluster, cluster.state, False, False)
# details of cluster that have passed
passed_cluster = save_cluster_details(passed_cluster, cluster)
except Exception as e:
print("Issue in {}:\n{}".format(cluster.name, e))
# details of cluster that have failed
failed_cluster = save_cluster_details(failed_cluster, cluster)
# printing results
print("--------------Passed Cluster information--------------'\n")
print("Clusters: " + ''.join('{0},'.format(key) for key, value in passed_cluster.items()))
for key, value in passed_cluster.items():
print(key + "-->" + str(value) + "\n")
print("--------------Failed Cluster information--------------'\n")
for key, value in failed_cluster.items():
print(key + "-->" + str(value) + "\n")
assert len(failed_cluster) == 0, "Clusters have failed to provision. " \
"Check logs for more info"
def validate_custom_cluster_kdm(cluster, aws_nodes):
if NODE_COUNT_KDM_CLUSTER == 4:
node_roles = [["controlplane"], ["etcd"], ["worker"], ["worker"]]
elif NODE_COUNT_KDM_CLUSTER == 2:
node_roles = [["controlplane", "etcd", "worker"], ["worker"]]
else:
node_roles = [["worker", "controlplane", "etcd"]]
client = get_user_client()
assert cluster.state == "provisioning"
i = 0
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
for nr in node_roles[i]:
aws_node.roles.append(nr)
aws_node.execute_command(docker_run_cmd)
i += 1
def get_dns_rke_config(dns_provider, network_plugin, nodelocaldns):
dns_entry = dict()
dns_entry["dns"] = {"type": "dnsConfig",
"provider": dns_provider}
cluster_options = network_plugin + "-" + dns_provider
if nodelocaldns:
dns_entry["dns"]["nodelocal"] = \
{"type": "nodelocal", "ipAddress": "169.254.20.10"}
cluster_options += "-nodelocaldns"
cluster_name = random_test_name(cluster_options)
return dns_entry, cluster_name
def create_kdm_clusters(cluster_name, rke_config_new,
aws_nodes, aws_nodes_index, list_process):
client = get_user_client()
cluster = client.create_cluster(name=cluster_name,
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=
rke_config_new)
p1 = multiprocessing.Process(
target=validate_custom_cluster_kdm,
args=(
cluster,
aws_nodes[aws_nodes_index:aws_nodes_index + NODE_COUNT_KDM_CLUSTER]
)
)
CLUSTER_LIST.append(cluster)
list_process.append(p1)
p1.start()
return list_process
def save_cluster_details(cluster_detail, cluster):
cluster_detail[cluster.name] = {}
cluster_detail[cluster.name]["k8s"] = \
cluster["rancherKubernetesEngineConfig"]["kubernetesVersion"]
cluster_detail[cluster.name]["network"] = \
cluster["rancherKubernetesEngineConfig"]["network"]["plugin"]
if DNS_MATRIX:
cluster_detail[cluster.name]["dns"] = \
cluster["rancherKubernetesEngineConfig"]["dns"]["provider"]
if "-nodelocaldns" in cluster.name:
cluster_detail[cluster.name]["nodelocaldns"] = \
"enabled"
else:
cluster_detail[cluster.name]["nodelocaldns"] = \
"disabled"
return cluster_detail
| 8,779 | 39.091324 | 94 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_v2_cis_scan.py
|
import pytest
import os
from .common import USER_TOKEN
from .common import get_cluster_client_for_token_v1
from .common import execute_kubectl_cmd
from .common import get_user_client_and_cluster
from .common import wait_until_app_v2_deployed
from .common import check_v2_app_and_uninstall
CIS_CHART_VERSION = os.environ.get('RANCHER_CIS_CHART_VERSION', "1.0.100")
SCAN_PROFILE = os.environ.get('RANCHER_SCAN_PROFILE', "rke-profile-permissive")
cluster_detail = {"cluster": None}
cis_annotations = \
{
"catalog.cattle.io/ui-source-repo": "rancher-charts",
"catalog.cattle.io/ui-source-repo-type": "cluster"
}
cis_charts = {
"values":
{ "global": {"cattle":{"clusterId": None, "clusterName": None}}},
"version": CIS_CHART_VERSION,
"projectId": None
}
CHART_NAME = "rancher-cis-benchmark"
def test_install_v2_cis_benchmark():
"""
List installed apps
Check if the app is installed
If installed, delete the app and the CRDs
Create namespace
Install App and the CRDs
:return:
"""
client = \
get_cluster_client_for_token_v1(
cluster_detail["cluster"]["id"],USER_TOKEN
)
rancherrepo = \
client.list_catalog_cattle_io_clusterrepo(id="rancher-charts")
cluster_id = cluster_detail["cluster"]["id"]
cluster_name = cluster_detail["cluster"]["name"]
rancher_repo = rancherrepo["data"][0]
# check if CIS is already installed and uninstall the app
check_v2_app_and_uninstall(client, CHART_NAME)
check_v2_app_and_uninstall(client, CHART_NAME + "-crd")
# create namespace
ns = "cis-operator-system"
command = "create namespace " + ns
execute_kubectl_cmd(command, False)
# install CIS v2
cis_charts["annotations"] = cis_annotations
cis_charts["values"]["global"]["cattle"]["clusterId"] = cluster_id
cis_charts["values"]["global"]["cattle"]["clusterName"] = cluster_name
cis_charts["chartName"] = CHART_NAME + "-crd"
cis_charts["releaseName"] = CHART_NAME + "-crd"
install_v2_app(client, rancher_repo, cis_charts, CHART_NAME + "-crd", ns)
# install app
cis_charts["chartName"] = CHART_NAME
cis_charts["releaseName"] = CHART_NAME
install_v2_app(client, rancher_repo, cis_charts, CHART_NAME, ns)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster_detail["cluster"] = get_user_client_and_cluster()
def install_v2_app(client, rancher_repo, chart_values, chart_name, ns):
# install CRD
response = client.action(obj=rancher_repo, action_name="install",
charts=[chart_values],
namespace=ns,
disableOpenAPIValidation=False,
noHooks=False,
projectId=None,
skipCRDs=False,
timeout="600s",
wait=True)
print("response", response)
app_list = wait_until_app_v2_deployed(client, chart_name)
assert chart_name in app_list
| 3,108 | 33.544444 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/conftest.py
|
import urllib3
from .common import * # NOQA
# This stops ssl warnings for insecure certs
urllib3.disable_warnings()
def pytest_configure(config):
if TEST_RBAC and CATTLE_TEST_URL:
rbac_prepare()
if AUTH_PROVIDER != "":
prepare_auth_data()
def pytest_unconfigure(config):
if TEST_RBAC and CATTLE_TEST_URL:
rbac_cleanup()
@pytest.fixture
def remove_resource(request):
"""Remove a resource after a test finishes even if the test fails.
How to use:
pass this function as an argument of your testing function,
then call this function with the new resource as argument after
creating any new resource
"""
client = get_admin_client()
def _cleanup(resource):
def clean():
try:
client.delete(resource)
except ApiError as e:
code = e.error.status
if code == 409 and "namespace will automatically be purged " \
in e.error.message:
pass
elif code != 404:
raise e
request.addfinalizer(clean)
return _cleanup
| 1,155 | 23.083333 | 78 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_auth.py
|
import json
import os
import pytest
import requests
from .common import ADMIN_TOKEN
from .common import AUTH_PROVIDER
from .common import AUTH_USER_PASSWORD
from .common import CATTLE_TEST_URL
from .common import cluster_cleanup
from .common import create_project_and_ns
from .common import get_admin_client
from .common import get_client_for_token
from .test_rke_cluster_provisioning import create_and_validate_custom_host
'''
Prerequisite:
1. testautoadmin as your admin user, if the fixture detects the auth
is disabled it will be enabled automatically.
2. Two clusters in your setup, if none or one are detected by the fixture
will create clusters to match two
'''
# Config Fields
HOSTNAME_OR_IP_ADDRESS = os.environ.get("RANCHER_HOSTNAME_OR_IP_ADDRESS")
PORT = os.environ.get("RANCHER_PORT", "")
CA_CERTIFICATE = os.environ.get("RANCHER_CA_CERTIFICATE", "")
OPENLDAP_CA_CERTIFICATE = os.environ.get("RANCHER_OPENLDAP_CA_CERTIFICATE", "")
FREEIPA_CA_CERTIFICATE = os.environ.get("RANCHER_FREEIPA_CA_CERTIFICATE", "")
CONNECTION_TIMEOUT = os.environ.get("RANCHER_CONNECTION_TIMEOUT", 5000)
SERVICE_ACCOUNT_NAME = os.environ.get("RANCHER_SERVICE_ACCOUNT_NAME")
SERVICE_ACCOUNT_PASSWORD = os.environ.get("RANCHER_SERVICE_ACCOUNT_PASSWORD")
DEFAULT_LOGIN_DOMAIN = os.environ.get("RANCHER_DEFAULT_LOGIN_DOMAIN")
USER_SEARCH_BASE = os.environ.get("RANCHER_USER_SEARCH_BASE")
GROUP_SEARCH_BASE = os.environ.get("RANCHER_GROUP_SEARCH_BASE")
AD_SPECIAL_CHAR_PASSWORD = os.environ.get("RANCHER_AD_SPECIAL_CHAR_PASSWORD")
OPENLDAP_SPECIAL_CHAR_PASSWORD = \
os.environ.get("RANCHER_OPENLDAP_SPECIAL_CHAR_PASSWORD")
FREEIPA_SPECIAL_CHAR_PASSWORD = \
os.environ.get("RANCHER_FREEIPA_SPECIAL_CHAR_PASSWORD")
OPENLDAP_HOSTNAME_OR_IP_ADDRESS = \
os.environ.get("RANCHER_OPENLDAP_HOSTNAME_OR_IP_ADDRESS")
OPENLDAP_SERVICE_ACCOUNT_NAME = \
os.environ.get("RANCHER_OPENLDAP_SERVICE_ACCOUNT_NAME")
OPENLDAP_SERVICE_ACCOUNT_PASSWORD = \
os.environ.get("RANCHER_OPENLDAP_SERVICE_ACCOUNT_PASSWORD")
OPENLDAP_USER_SEARCH_BASE = os.environ.get("RANCHER_OPENLDAP_USER_SEARCH_BASE")
OPENLDAP_AUTH_USER_PASSWORD = \
os.environ.get("RANCHER_OPENLDAP_AUTH_USER_PASSWORD")
FREEIPA_HOSTNAME_OR_IP_ADDRESS = \
os.environ.get("RANCHER_FREEIPA_HOSTNAME_OR_IP_ADDRESS")
FREEIPA_SERVICE_ACCOUNT_NAME = \
os.environ.get("RANCHER_FREEIPA_SERVICE_ACCOUNT_NAME")
FREEIPA_SERVICE_ACCOUNT_PASSWORD = \
os.environ.get("RANCHER_FREEIPA_SERVICE_ACCOUNT_PASSWORD")
FREEIPA_USER_SEARCH_BASE = os.environ.get("RANCHER_FREEIPA_USER_SEARCH_BASE")
FREEIPA_GROUP_SEARCH_BASE = os.environ.get("RANCHER_FREEIPA_GROUP_SEARCH_BASE")
FREEIPA_AUTH_USER_PASSWORD = \
os.environ.get("RANCHER_FREEIPA_AUTH_USER_PASSWORD")
PASSWORD = ""
if AUTH_PROVIDER == "activeDirectory":
PASSWORD = AUTH_USER_PASSWORD
elif AUTH_PROVIDER == "openLdap":
PASSWORD = OPENLDAP_AUTH_USER_PASSWORD
elif AUTH_PROVIDER == "freeIpa":
PASSWORD = FREEIPA_AUTH_USER_PASSWORD
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + \
"/v3-public/" + AUTH_PROVIDER + "Providers/" + \
AUTH_PROVIDER.lower() + "?action=login"
CATTLE_AUTH_PROVIDER_URL = \
CATTLE_TEST_URL + "/v3/" + AUTH_PROVIDER + "Configs/" +\
AUTH_PROVIDER.lower()
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
CATTLE_AUTH_ENABLE_URL = CATTLE_AUTH_PROVIDER_URL + "?action=testAndApply"
CATTLE_AUTH_DISABLE_URL = CATTLE_AUTH_PROVIDER_URL + "?action=disable"
setup = {"cluster1": None,
"project1": None,
"ns1": None,
"cluster2": None,
"project2": None,
"ns2": None,
"auth_setup_data": {},
"permission_denied_code": 403}
auth_setup_fname = \
os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource",
AUTH_PROVIDER.lower() + ".json")
def test_access_control_required_set_access_mode_required():
access_mode = "required"
validate_access_control_set_access_mode(access_mode)
def test_access_control_restricted_set_access_mode_required():
access_mode = "restricted"
validate_access_control_set_access_mode(access_mode)
def test_access_control_required_add_users_and_groups_to_cluster():
access_mode = "required"
validate_add_users_and_groups_to_cluster_or_project(
access_mode, add_users_to_cluster=True)
def test_access_control_restricted_add_users_and_groups_to_cluster():
access_mode = "restricted"
validate_add_users_and_groups_to_cluster_or_project(
access_mode, add_users_to_cluster=True)
def test_access_control_required_add_users_and_groups_to_project():
access_mode = "required"
validate_add_users_and_groups_to_cluster_or_project(
access_mode, add_users_to_cluster=False)
def test_access_control_restricted_add_users_and_groups_to_project():
access_mode = "restricted"
validate_add_users_and_groups_to_cluster_or_project(
access_mode, add_users_to_cluster=False)
def test_disable_and_enable_auth_set_access_control_required():
access_mode = "required"
validate_access_control_disable_and_enable_auth(access_mode)
def test_disable_and_enable_auth_set_access_control_restricted():
access_mode = "restricted"
validate_access_control_disable_and_enable_auth(access_mode)
# By default nestedgroup is disabled for ad and openldap, enabled for freeipa
def test_disable_and_enable_nestedgroups_set_access_control_required():
access_mode = "required"
validate_access_control_disable_and_enable_nestedgroups(access_mode)
def test_disable_and_enable_nestedgroup_set_access_control_restricted():
access_mode = "restricted"
validate_access_control_disable_and_enable_nestedgroups(access_mode)
def test_ad_service_account_login():
delete_project_users()
delete_cluster_users()
auth_setup_data = setup["auth_setup_data"]
admin_user = auth_setup_data["admin_user"]
# admin_user here is the AD admin user
if AUTH_PROVIDER == "activeDirectory":
admin_token = login(admin_user, AUTH_USER_PASSWORD)
disable_ad(admin_user, admin_token)
enable_ad(admin_user, admin_token)
login(SERVICE_ACCOUNT_NAME, SERVICE_ACCOUNT_PASSWORD)
def test_special_character_users_login_access_mode_required():
access_mode = "required"
special_character_users_login(access_mode)
def test_special_character_users_login_access_mode_restricted():
access_mode = "restricted"
special_character_users_login(access_mode)
def special_character_users_login(access_mode):
delete_project_users()
delete_cluster_users()
auth_setup_data = setup["auth_setup_data"]
admin_user = auth_setup_data["admin_user"]
admin_token = login(admin_user, PASSWORD)
allowed_principal_ids = []
if AUTH_PROVIDER == "activeDirectory":
disable_ad(admin_user, admin_token)
enable_ad(admin_user, admin_token)
if AUTH_PROVIDER == "openLdap":
disable_openldap(admin_user, admin_token)
enable_openldap(admin_user, admin_token)
if AUTH_PROVIDER == "freeIpa":
disable_freeipa(admin_user, admin_token)
enable_freeipa(admin_user, admin_token)
if AUTH_PROVIDER == "activeDirectory":
for user in auth_setup_data["specialchar_in_username"]:
allowed_principal_ids.append(principal_lookup(user, admin_token))
for user in auth_setup_data["specialchar_in_password"]:
allowed_principal_ids.append(principal_lookup(user, admin_token))
for user in auth_setup_data["specialchar_in_userdn"]:
allowed_principal_ids.append(principal_lookup(user, admin_token))
for group in auth_setup_data["specialchar_in_groupname"]:
allowed_principal_ids.append(principal_lookup(group, admin_token))
allowed_principal_ids.append(
principal_lookup(admin_user, admin_token))
add_users_to_site_access(
admin_token, access_mode, allowed_principal_ids)
for user in auth_setup_data["specialchar_in_username"]:
login(user, PASSWORD)
for user in auth_setup_data["specialchar_in_password"]:
login(user, AD_SPECIAL_CHAR_PASSWORD)
for user in auth_setup_data["specialchar_in_userdn"]:
login(user, PASSWORD)
for group in auth_setup_data["specialchar_in_groupname"]:
for user in auth_setup_data[group]:
login(user, PASSWORD)
if AUTH_PROVIDER == "openLdap":
for user in auth_setup_data["specialchar_in_user_cn_sn"]:
allowed_principal_ids.append(principal_lookup(user, admin_token))
for user in auth_setup_data["specialchar_in_uid"]:
allowed_principal_ids.append(principal_lookup(user, admin_token))
for user in auth_setup_data["specialchar_in_password"]:
allowed_principal_ids.append(principal_lookup(user, admin_token))
for group in auth_setup_data["specialchar_in_groupname"]:
allowed_principal_ids.append(principal_lookup(group, admin_token))
allowed_principal_ids.append(principal_lookup(admin_user, admin_token))
add_users_to_site_access(
admin_token, access_mode, allowed_principal_ids)
for user in auth_setup_data["specialchar_in_user_cn_sn"]:
login(user, PASSWORD)
for user in auth_setup_data["specialchar_in_uid"]:
login(user, PASSWORD)
for user in auth_setup_data["specialchar_in_password"]:
login(user, OPENLDAP_SPECIAL_CHAR_PASSWORD)
for group in auth_setup_data["specialchar_in_groupname"]:
for user in auth_setup_data[group]:
login(user, PASSWORD)
if AUTH_PROVIDER == "freeIpa":
for user in auth_setup_data["specialchar_in_users"]:
allowed_principal_ids.append(principal_lookup(user, admin_token))
for user in auth_setup_data["specialchar_in_password"]:
allowed_principal_ids.append(principal_lookup(user, admin_token))
for group in auth_setup_data["specialchar_in_groupname"]:
allowed_principal_ids.append(principal_lookup(group, admin_token))
allowed_principal_ids.append(
principal_lookup(admin_user, admin_token))
add_users_to_site_access(
admin_token, access_mode, allowed_principal_ids)
for user in auth_setup_data["specialchar_in_users"]:
login(user, PASSWORD)
for user in auth_setup_data["specialchar_in_password"]:
login(user, FREEIPA_SPECIAL_CHAR_PASSWORD)
for group in auth_setup_data["specialchar_in_groupname"]:
for user in auth_setup_data[group]:
login(user, PASSWORD)
def validate_access_control_set_access_mode(access_mode):
delete_cluster_users()
auth_setup_data = setup["auth_setup_data"]
admin_user = auth_setup_data["admin_user"]
token = login(admin_user, PASSWORD)
allowed_principal_ids = []
for user in auth_setup_data["allowed_users"]:
allowed_principal_ids.append(principal_lookup(user, token))
for group in auth_setup_data["allowed_groups"]:
allowed_principal_ids.append(principal_lookup(group, token))
allowed_principal_ids.append(principal_lookup(admin_user, token))
# Add users and groups in allowed list to access rancher-server
add_users_to_site_access(token, access_mode, allowed_principal_ids)
for user in auth_setup_data["allowed_users"]:
login(user, PASSWORD)
for group in auth_setup_data["allowed_groups"]:
for user in auth_setup_data[group]:
login(user, PASSWORD)
for user in auth_setup_data["dis_allowed_users"]:
login(user, PASSWORD,
expected_status=setup["permission_denied_code"])
for group in auth_setup_data["dis_allowed_groups"]:
for user in auth_setup_data[group]:
login(user, PASSWORD,
expected_status=setup["permission_denied_code"])
# Add users and groups from dis allowed list to access rancher-server
for user in auth_setup_data["dis_allowed_users"]:
allowed_principal_ids.append(principal_lookup(user, token))
for group in auth_setup_data["dis_allowed_groups"]:
for user in auth_setup_data[group]:
allowed_principal_ids.append(principal_lookup(user, token))
add_users_to_site_access(token, access_mode, allowed_principal_ids)
for user in auth_setup_data["allowed_users"]:
login(user, PASSWORD)
for group in auth_setup_data["allowed_groups"]:
for user in auth_setup_data[group]:
login(user, PASSWORD)
for user in auth_setup_data["dis_allowed_users"]:
login(user, PASSWORD)
for group in auth_setup_data["dis_allowed_groups"]:
for user in auth_setup_data[group]:
login(user, PASSWORD)
# Remove users and groups from allowed list to access rancher-server
allowed_principal_ids = [principal_lookup(admin_user, token)]
for user in auth_setup_data["dis_allowed_users"]:
allowed_principal_ids.append(principal_lookup(user, token))
for group in auth_setup_data["dis_allowed_groups"]:
for user in auth_setup_data[group]:
allowed_principal_ids.append(principal_lookup(user, token))
add_users_to_site_access(token, access_mode, allowed_principal_ids)
for user in auth_setup_data["allowed_users"]:
login(user, PASSWORD,
expected_status=setup["permission_denied_code"])
for group in auth_setup_data["allowed_groups"]:
for user in auth_setup_data[group]:
login(user, PASSWORD,
expected_status=setup["permission_denied_code"])
for user in auth_setup_data["dis_allowed_users"]:
login(user, PASSWORD)
for group in auth_setup_data["dis_allowed_groups"]:
for user in auth_setup_data[group]:
login(user, PASSWORD)
def validate_add_users_and_groups_to_cluster_or_project(
access_mode, add_users_to_cluster=True):
delete_cluster_users()
client = get_admin_client()
for project in client.list_project():
delete_existing_users_in_project(client, project)
auth_setup_data = setup["auth_setup_data"]
admin_user = auth_setup_data["admin_user"]
token = login(admin_user, PASSWORD)
allowed_principal_ids = [principal_lookup(admin_user, token)]
# Add users and groups in allowed list to access rancher-server
add_users_to_site_access(token, access_mode, allowed_principal_ids)
if add_users_to_cluster:
groups_to_check = auth_setup_data["groups_added_to_cluster"]
users_to_check = auth_setup_data["users_added_to_cluster"]
else:
groups_to_check = auth_setup_data["groups_added_to_project"]
users_to_check = auth_setup_data["users_added_to_project"]
for group in groups_to_check:
for user in auth_setup_data[group]:
login(user, PASSWORD,
expected_status=setup["permission_denied_code"])
for user in users_to_check:
login(user, PASSWORD,
expected_status=setup["permission_denied_code"])
client = get_client_for_token(token)
for group in groups_to_check:
if add_users_to_cluster:
assign_user_to_cluster(client, principal_lookup(group, token),
setup["cluster1"], "cluster-owner")
else:
assign_user_to_project(client, principal_lookup(group, token),
setup["project2"], "project-owner")
for user in users_to_check:
if add_users_to_cluster:
assign_user_to_cluster(client, principal_lookup(user, token),
setup["cluster1"], "cluster-owner")
else:
assign_user_to_project(client, principal_lookup(user, token),
setup["project2"], "project-owner")
expected_status = setup["permission_denied_code"]
if access_mode == "required":
expected_status = setup["permission_denied_code"]
if access_mode == "restricted":
expected_status = 201
for group in groups_to_check:
for user in auth_setup_data[group]:
login(user, PASSWORD, expected_status)
for user in users_to_check:
login(user, PASSWORD, expected_status)
def validate_access_control_disable_and_enable_auth(access_mode):
delete_cluster_users()
delete_project_users()
auth_setup_data = setup["auth_setup_data"]
# Login as admin user to disable auth, should be success, then enable it.
admin_user = auth_setup_data["admin_user"]
admin_token = login(admin_user, PASSWORD)
if AUTH_PROVIDER == "activeDirectory":
disable_ad(admin_user, admin_token)
enable_ad(admin_user, admin_token)
if AUTH_PROVIDER == "openLdap":
disable_openldap(admin_user, admin_token)
enable_openldap(admin_user, admin_token)
if AUTH_PROVIDER == "freeIpa":
disable_freeipa(admin_user, admin_token)
enable_freeipa(admin_user, admin_token)
# Login as users within allowed principal id list, which cannot perform
# disable action.
allowed_principal_ids = []
for user in auth_setup_data["allowed_users"]:
allowed_principal_ids.append(principal_lookup(user, admin_token))
allowed_principal_ids.append(principal_lookup(admin_user, admin_token))
# Add users in allowed list to access rancher-server
add_users_to_site_access(admin_token, access_mode, allowed_principal_ids)
for user in auth_setup_data["allowed_users"]:
token = login(user, PASSWORD)
if AUTH_PROVIDER == "activeDirectory":
disable_ad(user, token,
expected_status=setup["permission_denied_code"])
enable_ad(user, token,
expected_status=setup["permission_denied_code"])
if AUTH_PROVIDER == "openLdap":
disable_openldap(user, token,
expected_status=setup["permission_denied_code"])
enable_openldap(user, token,
expected_status=setup["permission_denied_code"])
if AUTH_PROVIDER == "freeIpa":
disable_freeipa(user, token,
expected_status=setup["permission_denied_code"])
enable_freeipa(user, token,
expected_status=setup["permission_denied_code"])
def validate_access_control_disable_and_enable_nestedgroups(access_mode):
delete_project_users()
delete_cluster_users()
auth_setup_data = setup["auth_setup_data"]
admin_user = auth_setup_data["admin_user"]
token = login(admin_user, PASSWORD)
if AUTH_PROVIDER == "activeDirectory":
enable_ad(admin_user, token)
if AUTH_PROVIDER == "openLdap":
enable_openldap(admin_user, token)
if AUTH_PROVIDER == "freeIpa":
enable_freeipa(admin_user, token)
allowed_principal_ids = []
for group in auth_setup_data["allowed_nestedgroups"]:
allowed_principal_ids.append(principal_lookup(group, token))
allowed_principal_ids.append(principal_lookup(admin_user, token))
# Add users in allowed list to access rancher-server
add_users_to_site_access(token, access_mode, allowed_principal_ids)
for group in auth_setup_data["allowed_nestedgroups"]:
for user in auth_setup_data[group]:
login(user, PASSWORD)
if AUTH_PROVIDER == "freeIpa":
for user in auth_setup_data["users_under_nestedgroups"]:
login(user, PASSWORD)
if AUTH_PROVIDER == "activeDirectory" or AUTH_PROVIDER == "openLdap":
for user in auth_setup_data["users_under_nestedgroups"]:
login(user, PASSWORD,
expected_status=setup["permission_denied_code"])
# Enable nestedgroup feature, so users under nestedgroups can login
# successfully
if AUTH_PROVIDER == "activeDirectory":
enable_ad(admin_user, token, nested=True)
if AUTH_PROVIDER == "openLdap":
enable_openldap(admin_user, token, nested=True)
allowed_principal_ids = []
for group in auth_setup_data["allowed_nestedgroups"]:
allowed_principal_ids.append(principal_lookup(group, token))
allowed_principal_ids.append(principal_lookup(admin_user, token))
# Add users in allowed list to access rancher-server
add_users_to_site_access(token, access_mode, allowed_principal_ids)
for group in auth_setup_data["allowed_nestedgroups"]:
for user in auth_setup_data[group]:
login(user, PASSWORD)
for user in auth_setup_data["users_under_nestedgroups"]:
login(user, PASSWORD)
def login(username, password, expected_status=201):
token = ""
r = requests.post(CATTLE_AUTH_URL, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
assert r.status_code == expected_status
print("Login request for " + username + " " + str(expected_status))
if expected_status == 201:
token = r.json()['token']
return token
def get_tls(certificate):
if len(certificate) != 0:
tls = True
else:
tls = False
return tls
def enable_openldap(username, token, enable_url=CATTLE_AUTH_ENABLE_URL,
password=PASSWORD, nested=False,
expected_status=200):
headers = {'Authorization': 'Bearer ' + token}
ldap_config = {
"accessMode": "unrestricted",
"connectionTimeout": CONNECTION_TIMEOUT,
"certificate": OPENLDAP_CA_CERTIFICATE,
"groupDNAttribute": "entryDN",
"groupMemberMappingAttribute": "member",
"groupMemberUserAttribute": "entryDN",
"groupNameAttribute": "cn",
"groupObjectClass": "groupOfNames",
"groupSearchAttribute": "cn",
"nestedGroupMembershipEnabled": nested,
"enabled": True,
"port": PORT,
"servers": [OPENLDAP_HOSTNAME_OR_IP_ADDRESS],
"serviceAccountDistinguishedName": OPENLDAP_SERVICE_ACCOUNT_NAME,
"tls": get_tls(OPENLDAP_CA_CERTIFICATE),
"userDisabledBitMask": 0,
"userLoginAttribute": "uid",
"userMemberAttribute": "memberOf",
"userNameAttribute": "cn",
"userObjectClass": "inetOrgPerson",
"userSearchAttribute": "uid|sn|givenName",
"userSearchBase": OPENLDAP_USER_SEARCH_BASE,
"serviceAccountPassword": OPENLDAP_SERVICE_ACCOUNT_PASSWORD
}
ca_cert = ldap_config["certificate"]
ldap_config["certificate"] = ca_cert.replace('\\n', '\n')
r = requests.post(enable_url,
json={
"ldapConfig": ldap_config,
"username": username,
"password": password},
verify=False, headers=headers)
nested_msg = " nested group " if nested else " "
print(f"Enable OpenLDAP{nested_msg}request for {username} "
f"{expected_status}")
assert r.status_code == expected_status
def disable_openldap(username, token, expected_status=200):
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_DISABLE_URL, json={
'username': username,
'password': PASSWORD
}, verify=False, headers=headers)
assert r.status_code == expected_status
print("Disable openLdap request for " +
username + " " + str(expected_status))
def enable_ad(username, token, enable_url=CATTLE_AUTH_ENABLE_URL,
password=AUTH_USER_PASSWORD, nested=False, expected_status=200):
headers = {'Authorization': 'Bearer ' + token}
active_directory_config = {
"accessMode": "unrestricted",
"certificate": CA_CERTIFICATE,
"connectionTimeout": CONNECTION_TIMEOUT,
"defaultLoginDomain": DEFAULT_LOGIN_DOMAIN,
"groupDNAttribute": "distinguishedName",
"groupMemberMappingAttribute": "member",
"groupMemberUserAttribute": "distinguishedName",
"groupNameAttribute": "name",
"groupObjectClass": "group",
"groupSearchAttribute": "sAMAccountName",
"nestedGroupMembershipEnabled": nested,
"port": PORT,
"servers": [HOSTNAME_OR_IP_ADDRESS],
"serviceAccountUsername": SERVICE_ACCOUNT_NAME,
"tls": get_tls(CA_CERTIFICATE),
"userDisabledBitMask": 2,
"userEnabledAttribute": "userAccountControl",
"userLoginAttribute": "sAMAccountName",
"userNameAttribute": "name",
"userObjectClass": "person",
"userSearchAttribute": "sAMAccountName|sn|givenName",
"userSearchBase": USER_SEARCH_BASE,
"serviceAccountPassword": SERVICE_ACCOUNT_PASSWORD
}
ca_cert = active_directory_config["certificate"]
active_directory_config["certificate"] = ca_cert.replace('\\n', '\n')
r = requests.post(enable_url,
json={"activeDirectoryConfig": active_directory_config,
"enabled": True,
"username": username,
"password": password},
verify=False, headers=headers)
nested_msg = " nested group " if nested else " "
print(f"Enable ActiveDirectory{nested_msg}request for {username} "
f"{expected_status}")
assert r.status_code == expected_status
def disable_ad(username, token, expected_status=200):
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_DISABLE_URL,
json={"enabled": False,
"username": username,
"password": AUTH_USER_PASSWORD
},
verify=False, headers=headers)
print("Disable ActiveDirectory request for " +
username + " " + str(expected_status))
assert r.status_code == expected_status
def enable_freeipa(username, token, enable_url=CATTLE_AUTH_ENABLE_URL,
password=PASSWORD, nested=False,
expected_status=200):
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(enable_url, json={
"ldapConfig": {
"accessMode": "unrestricted",
"certificate": FREEIPA_CA_CERTIFICATE,
"connectionTimeout": CONNECTION_TIMEOUT,
"groupDNAttribute": "entrydn",
"groupMemberMappingAttribute": "member",
"groupMemberUserAttribute": "entrydn",
"groupNameAttribute": "cn",
"groupObjectClass": "groupofnames",
"groupSearchAttribute": "cn",
"groupSearchBase": FREEIPA_GROUP_SEARCH_BASE,
"enabled": True,
"nestedGroupMembershipEnabled": nested,
"port": PORT,
"servers": [FREEIPA_HOSTNAME_OR_IP_ADDRESS],
"serviceAccountDistinguishedName": FREEIPA_SERVICE_ACCOUNT_NAME,
"tls": get_tls(FREEIPA_CA_CERTIFICATE),
"userDisabledBitMask": 0,
"userLoginAttribute": "uid",
"userMemberAttribute": "memberOf",
"userNameAttribute": "givenName",
"userObjectClass": "inetorgperson",
"userSearchAttribute": "uid|sn|givenName",
"userSearchBase": FREEIPA_USER_SEARCH_BASE,
"serviceAccountPassword": FREEIPA_SERVICE_ACCOUNT_PASSWORD
},
"username": username,
"password": password
}, verify=False, headers=headers)
print("Enable freeIpa request for " +
username + " " + str(expected_status))
assert r.status_code == expected_status
def disable_freeipa(username, token, expected_status=200):
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_DISABLE_URL, json={
"enabled": False,
"username": username,
"password": AUTH_USER_PASSWORD
}, verify=False, headers=headers)
assert r.status_code == expected_status
print("Disable freeIpa request for " +
username + " " + str(expected_status))
def principal_lookup(name, token):
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': name, 'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == 200
principals = r.json()['data']
for principal in principals:
if principal['principalType'] == "user":
if principal['loginName'] == name:
return principal["id"]
if principal['principalType'] == "group":
if principal['name'] == name:
return principal["id"]
assert False
def add_users_to_site_access(token, access_mode, allowed_principal_ids):
headers = {'Authorization': 'Bearer ' + token}
r = requests.put(CATTLE_AUTH_PROVIDER_URL, json={
'allowedPrincipalIds': allowed_principal_ids,
'accessMode': access_mode,
'responseType': 'json',
}, verify=False, headers=headers)
print(r.json())
def assign_user_to_cluster(client, principal_id, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
userPrincipalId=principal_id)
return crtb
def assign_user_to_project(client, principal_id, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
userPrincipalId=principal_id)
return prtb
def delete_existing_users_in_cluster(client, cluster):
crtbs = client.list_cluster_role_template_binding(clusterId=cluster.id)
for crtb in crtbs:
client.delete(crtb)
def delete_existing_users_in_project(client, project):
prtbs = client.list_project_role_template_binding(projectId=project.id)
for prtb in prtbs:
client.delete(prtb)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
'''
This method enables auth on the Rancher server setup. Two clusters are
required in the setup. If two clusters are not present, it will create
the clusters.
'''
if AUTH_PROVIDER not in ("activeDirectory", "openLdap", "freeIpa"):
assert False, "Auth Provider set is not supported"
setup["auth_setup_data"] = load_setup_data()
client = get_admin_client()
configs = client.list_auth_config(name=AUTH_PROVIDER.lower())
if not configs.data[0].enabled:
auth_setup_data = setup["auth_setup_data"]
admin_user = auth_setup_data["admin_user"]
if AUTH_PROVIDER == 'activeDirectory':
enable_ad(admin_user, ADMIN_TOKEN)
elif AUTH_PROVIDER == 'openLdap':
enable_openldap(admin_user, ADMIN_TOKEN)
elif AUTH_PROVIDER == 'freeIpa':
enable_freeipa(admin_user, ADMIN_TOKEN)
cluster_total = len(client.list_cluster().data)
node_roles = [["controlplane", "etcd", "worker"]]
kargs = {'node_roles': node_roles,
'random_cluster_name': True,
'validate': False}
aws_nodes1 = cluster1 = None
aws_nodes2 = cluster2 = None
if cluster_total == 0:
cluster1, aws_nodes1 = create_and_validate_custom_host(**kargs)
cluster2, aws_nodes2 = create_and_validate_custom_host(**kargs)
if cluster_total == 1:
cluster2, aws_nodes2 = create_and_validate_custom_host(**kargs)
clusters = client.list_cluster().data
assert len(clusters) >= 2
cluster1 = cluster1 if cluster1 else clusters[0]
for project in client.list_project():
delete_existing_users_in_project(client, project)
p1, ns1 = create_project_and_ns(ADMIN_TOKEN, cluster1)
cluster2 = cluster2 if cluster2 else clusters[1]
p2, ns2 = create_project_and_ns(ADMIN_TOKEN, cluster2)
setup["cluster1"] = cluster1
setup["project1"] = p1
setup["ns1"] = ns1
setup["cluster2"] = cluster2
setup["project2"] = p2
setup["ns2"] = ns2
def fin():
client.delete(setup["project1"])
client.delete(setup["project2"])
delete_cluster_users()
if cluster_total == 0:
if aws_nodes1 and aws_nodes2:
cluster_cleanup(client, cluster1, aws_nodes1)
cluster_cleanup(client, cluster2, aws_nodes2)
if cluster_total == 1:
if aws_nodes2:
cluster_cleanup(client, cluster2, aws_nodes2)
request.addfinalizer(fin)
def delete_cluster_users():
delete_existing_users_in_cluster(get_admin_client(), setup["cluster1"])
delete_existing_users_in_cluster(get_admin_client(), setup["cluster2"])
def delete_project_users():
delete_existing_users_in_project(get_admin_client(), setup["project1"])
delete_existing_users_in_project(get_admin_client(), setup["project2"])
def load_setup_data():
auth_setup_file = open(auth_setup_fname)
auth_setup_str = auth_setup_file.read()
auth_setup_data = json.loads(auth_setup_str)
return auth_setup_data
| 33,106 | 38.319477 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_node_annotation.py
|
import pytest
import time
from .common import create_kubeconfig
from .common import CLUSTER_MEMBER
from .common import CLUSTER_OWNER
from .common import PROJECT_MEMBER
from .common import PROJECT_OWNER
from .common import PROJECT_READ_ONLY
from .common import get_client_for_token
from .common import get_node_details
from .common import get_user_client_and_cluster
from .common import execute_kubectl_cmd
from .common import if_test_rbac
from .common import random_name
from .common import rbac_get_user_token_by_role
from rancher import ApiError
cluster_detail = {"cluster": None, "client": None}
roles = [CLUSTER_MEMBER, CLUSTER_OWNER, PROJECT_OWNER, PROJECT_MEMBER,
PROJECT_READ_ONLY]
def test_node_annotation_add():
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add annotation through API
node_annotations = node.annotations.data_dict()
node_annotations[annotation_key] = annotation_value
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# delete annotation
del node_annotations[annotation_key]
client.update(node, annotations=node_annotations)
def test_node_annotation_add_multiple():
annotation_key_1 = random_name()
annotation_value_1 = random_name()
annotation_key_2 = random_name()
annotation_value_2 = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add annotation through API
node_annotations = node.annotations.data_dict()
node_annotations[annotation_key_1] = annotation_value_1
node_annotations[annotation_key_2] = annotation_value_2
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key_1,
annotation_value_1)
validate_annotation_set_on_node(client, node,
annotation_key_2,
annotation_value_2)
# delete annotation
del node_annotations[annotation_key_1]
del node_annotations[annotation_key_2]
client.update(node, annotations=node_annotations)
def test_node_annotation_edit():
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add annotation through API
node_annotations = node.annotations.data_dict()
node_annotations[annotation_key] = annotation_value
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be added
node = client.reload(node)
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# edit annotation through API
node = client.reload(node)
node_annotations = node.annotations.data_dict()
new_value = random_name()
node_annotations[annotation_key] = new_value
client.update(node, annotations=node_annotations)
node = client.reload(node)
time.sleep(2)
validate_annotation_set_on_node(client, node,
annotation_key,
new_value)
# delete annotation
del node_annotations[annotation_key]
client.update(node, annotations=node_annotations)
def test_node_annotation_delete():
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add annotation on node
node_annotations = node.annotations.data_dict()
node_annotations[annotation_key] = annotation_value
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# delete annotation
del node_annotations[annotation_key]
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be deleted
validate_annotation_deleted_on_node(client, node, annotation_key)
def test_node_annotation_delete_multiple():
annotation_key_1 = random_name()
annotation_value_1 = random_name()
annotation_key_2 = random_name()
annotation_value_2 = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add annotation on node
node_annotations = node.annotations.data_dict()
node_annotations[annotation_key_1] = annotation_value_1
node_annotations[annotation_key_2] = annotation_value_2
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key_1,
annotation_value_1)
validate_annotation_set_on_node(client, node,
annotation_key_2,
annotation_value_2)
# delete annotation
del node_annotations[annotation_key_1]
del node_annotations[annotation_key_2]
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be deleted
validate_annotation_deleted_on_node(client, node, annotation_key_1)
validate_annotation_deleted_on_node(client, node, annotation_key_2)
def test_node_annotation_kubectl_add():
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# add annotation on node
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + annotation_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotation should be added
node = client.reload(node)
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# remove annotation
node = client.reload(node)
node_annotations = node.annotations.data_dict()
del node_annotations[annotation_key]
client.update(node, annotations=node_annotations)
def test_node_annotation_kubectl_edit():
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# add annotation on node
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + annotation_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotation should be added
node = client.reload(node)
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# edit annotation through kubectl
new_value = random_name()
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + new_value + " --overwrite"
print(command)
execute_kubectl_cmd(command, False)
node = client.reload(node)
time.sleep(2)
# New annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value=new_value)
# remove annotation
node = client.reload(node)
node_annotations = node.annotations.data_dict()
del node_annotations[annotation_key]
client.update(node, annotations=node_annotations)
def test_node_annotation_kubectl_delete():
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# add annotation on node
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + annotation_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# remove annotation through kubectl
command = "annotate node " + node_name + " " + annotation_key + "-"
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotationv should be deleted
validate_annotation_deleted_on_node(client, node, annotation_key)
def test_node_annotation_k_add_a_delete_k_add():
"""Add via kubectl, Delete via API, Add via kubectl"""
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + annotation_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# delete annotation
node = client.reload(node)
node_annotations = node.annotations.data_dict()
del node_annotations[annotation_key]
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be deleted
validate_annotation_deleted_on_node(client, node, annotation_key)
# Add annotation via kubectl
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# clean up annotation
node = client.reload(node)
node_annotations = node.annotations.data_dict()
del node_annotations[annotation_key]
client.update(node, annotations=node_annotations)
def test_node_annotation_k_add_a_edit_k_edit():
"""Add via kubectl, edit via API, edit via kubectl"""
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + annotation_value
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotation should be added
node = client.reload(node)
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# edit annotation through API
node = client.reload(node)
node_annotations = node.annotations.data_dict()
new_value = random_name()
node_annotations[annotation_key] = new_value
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be added
node = client.reload(node)
validate_annotation_set_on_node(client, node,
annotation_key,
new_value)
# edit annotation through kubectl
new_value_2 = random_name()
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + new_value_2 + " --overwrite"
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# New annotation should be added
node = client.reload(node)
validate_annotation_set_on_node(client, node,
annotation_key,
new_value_2)
# remove annotation
node = client.reload(node)
node_annotations = node.annotations.data_dict()
del node_annotations[annotation_key]
client.update(node, annotations=node_annotations)
def test_node_annotations_a_add_k_delete_a_add():
"""Add via API, Delete via kubectl, Add via API"""
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
node_annotations = node.annotations.data_dict()
node_annotations[annotation_key] = annotation_value
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be added
node = client.reload(node)
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# delete annotation
command = " annotate node " + node_name + " " + annotation_key + "-"
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotation should be deleted
node = client.reload(node)
validate_annotation_deleted_on_node(client, node, annotation_key)
# Add annotation via API
node = client.reload(node)
node_annotations = node.annotations.data_dict()
node_annotations[annotation_key] = annotation_value
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# clean up annotation
node = client.reload(node)
node_annotations = node.annotations.data_dict()
del node_annotations[annotation_key]
client.update(node, annotations=node_annotations)
def test_node_annotation_a_add_k_edit_a_edit():
"""Add via API, Edit via kubectl, Edit via API"""
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
node_annotations = node.annotations.data_dict()
node_annotations[annotation_key] = annotation_value
client.update(node, annotations=node_annotations)
time.sleep(2)
# annotation should be added
node = client.reload(node)
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# edit annotation through kubectl
new_value = random_name()
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + new_value + " --overwrite"
print(command)
execute_kubectl_cmd(command, False)
node = client.reload(node)
time.sleep(2)
# New annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value=new_value)
# edit annotation through API
node = client.reload(node)
node_annotations = node.annotations.data_dict()
new_value_2 = random_name()
node_annotations[annotation_key] = new_value_2
client.update(node, annotations=node_annotations)
node = client.reload(node)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key,
new_value_2)
# clean up annotation
node = client.reload(node)
node_annotations = node.annotations.data_dict()
del node_annotations[annotation_key]
client.update(node, annotations=node_annotations)
@if_test_rbac
@pytest.mark.parametrize("role", roles)
def test_rbac_node_annotation_add(role):
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_annotations = node.annotations.data_dict()
# get user token and client
token = rbac_get_user_token_by_role(role)
print("token: ", token)
user_client = get_client_for_token(token)
node_annotations[annotation_key] = annotation_value
if role == CLUSTER_OWNER:
user_client.update(node, annotations=node_annotations)
node = client.reload(node)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(user_client, node,
annotation_key,
annotation_value)
# cleanup annotation
delete_node_annotation(annotation_key, node, client)
else:
with pytest.raises(ApiError) as e:
user_client.update(node, annotations=node_annotations)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", roles)
def test_rbac_node_annotation_delete(role):
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add annotation on node
node_annotations = node.annotations.data_dict()
node_annotations[annotation_key] = annotation_value
client.update(node, annotations=node_annotations)
node = client.reload(node)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# delete annotation
del node_annotations[annotation_key]
# get user token and client
token = rbac_get_user_token_by_role(role)
print("token: ", token)
user_client = get_client_for_token(token)
if role == CLUSTER_OWNER:
user_client.update(node, annotations=node_annotations)
node = client.reload(node)
time.sleep(2)
# annotation should be added
validate_annotation_deleted_on_node(user_client, node, annotation_key)
else:
with pytest.raises(ApiError) as e:
user_client.update(node, annotations=node_annotations)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
if role != CLUSTER_OWNER:
# cleanup annotation
delete_node_annotation(annotation_key, node, client)
@if_test_rbac
@pytest.mark.parametrize("role", roles)
def test_rbac_node_annotation_edit(role):
annotation_key = random_name()
annotation_value = random_name()
annotation_value_new = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add annotation on node
node_annotations = node.annotations.data_dict()
node_annotations[annotation_key] = annotation_value
client.update(node, annotations=node_annotations)
node = client.reload(node)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value)
# edit annotation
node_annotations[annotation_key] = annotation_value_new
# get user token and client
token = rbac_get_user_token_by_role(role)
print("token: ", token)
user_client = get_client_for_token(token)
if role == CLUSTER_OWNER:
user_client.update(node, annotations=node_annotations)
node = client.reload(node)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(user_client, node,
annotation_key,
annotation_value_new)
else:
with pytest.raises(ApiError) as e:
user_client.update(node, annotations=node_annotations)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
delete_node_annotation(annotation_key, node, client)
@if_test_rbac
@pytest.mark.parametrize("role", roles)
def test_rbac_node_annotation_add_kubectl(role):
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# get user token and client
token = rbac_get_user_token_by_role(role)
user_client = get_client_for_token(token)
print(cluster_detail["cluster"]["id"])
print(cluster_detail["cluster"])
cluster = user_client.list_cluster(id=cluster_detail["cluster"]["id"]).data
print(cluster)
create_kubeconfig(cluster[0])
# add annotation on node
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + annotation_value
if role == CLUSTER_OWNER:
execute_kubectl_cmd(command, False)
node = client.reload(node)
time.sleep(2)
# annotation should be added
validate_annotation_set_on_node(user_client, node,
annotation_key,
annotation_value)
# cleanup annotation
delete_node_annotation(annotation_key, node, client)
elif role == CLUSTER_MEMBER:
result = execute_kubectl_cmd(command, False, stderr=True)
result = result.decode('ascii')
assert "cannot patch resource \"nodes\"" in result
assert "forbidden" in result
else:
result = execute_kubectl_cmd(command, False, stderr=True)
result = result.decode('ascii')
assert "cannot get resource \"nodes\"" in result
assert "forbidden" in result
@if_test_rbac
@pytest.mark.parametrize("role", roles)
def test_rbac_node_annotation_delete_kubectl(role):
annotation_key = random_name()
annotation_value = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# add annotation on node
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + annotation_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotation should be added
node = client.reload(node)
# get user token and client
token = rbac_get_user_token_by_role(role)
user_client = get_client_for_token(token)
print(cluster_detail["cluster"]["id"])
print(cluster_detail["cluster"])
cluster = user_client.list_cluster(id=cluster_detail["cluster"]["id"]).data
print(cluster)
create_kubeconfig(cluster[0])
# remove annotation through kubectl
command = "annotate node " + node_name + " " + annotation_key + "-"
if role == CLUSTER_OWNER:
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotation should be deleted
validate_annotation_deleted_on_node(user_client, node, annotation_key)
elif role == CLUSTER_MEMBER:
result = execute_kubectl_cmd(command, False, stderr=True)
result = result.decode('ascii')
assert "cannot patch resource \"nodes\"" in result
assert "forbidden" in result
# cleanup annotation
delete_node_annotation(annotation_key, node, client)
else:
result = execute_kubectl_cmd(command, False, stderr=True)
result = result.decode('ascii')
assert "cannot get resource \"nodes\"" in result
assert "forbidden" in result
# cleanup annotation
delete_node_annotation(annotation_key, node, client)
@if_test_rbac
@pytest.mark.parametrize("role", roles)
def test_rbac_node_annotation_edit_kubectl(role):
annotation_key = random_name()
annotation_value = random_name()
annotation_value_new = random_name()
# get node details
client, node = \
get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# add annotation on node
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + annotation_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotation should be added
node = client.reload(node)
# get user token and client
token = rbac_get_user_token_by_role(role)
user_client = get_client_for_token(token)
print(cluster_detail["cluster"]["id"])
print(cluster_detail["cluster"])
cluster = user_client.list_cluster(id=cluster_detail["cluster"]["id"]).data
print(cluster)
create_kubeconfig(cluster[0])
# edit annotation through kubectl
command = "annotate nodes " + node_name + " " + \
annotation_key + "=" + annotation_value_new + " --overwrite"
if role == CLUSTER_OWNER:
execute_kubectl_cmd(command, False)
time.sleep(2)
# annotation should be deleted
validate_annotation_set_on_node(user_client, node,
annotation_key,
annotation_value_new)
elif role == CLUSTER_MEMBER:
result = execute_kubectl_cmd(command, False, stderr=True)
result = result.decode('ascii')
assert "cannot patch resource \"nodes\"" in result
assert "forbidden" in result
# cleanup annotation
else:
result = execute_kubectl_cmd(command, False, stderr=True)
result = result.decode('ascii')
assert "cannot get resource \"nodes\"" in result
assert "forbidden" in result
# cleanup annotation
delete_node_annotation(annotation_key, node, client)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
cluster_detail["client"], cluster_detail["cluster"] = \
get_user_client_and_cluster()
def validate_annotation_set_on_node(client, node,
annotation_key,
annotation_value):
"""
This method checks if the annotation is
added on the node via API and kubectl
:param client: user client
:param node: node on which user has to validate if the annotation is added
:param annotation_key: annotation to be validated on the node
:param annotation_value: annotaton value to be checked
:return: None
"""
print("annotaton_value: ", annotation_value)
# check via API
node = client.reload(node)
node_annotations = node.annotations.data_dict()
assert node_annotations[annotation_key] == annotation_value
# check via kubectl
node_name = node.nodeName
command = " get nodes " + node_name
node_detail = execute_kubectl_cmd(command)
print(node_detail["metadata"]["annotations"])
assert annotation_key in node_detail["metadata"]["annotations"], \
"Annotation is not set in kubectl"
assert node_detail["metadata"]["annotations"][annotation_key] \
== annotation_value
def validate_annotation_deleted_on_node(client, node, annotation_key):
"""
This method checks if the annotation is deleted
on the node via API and kubectl
:param client: user client
:param node: node on which user has to validate if the
annotation is deleted
:param annotation_key: annotation to be validated on the node
:return: None
"""
# check via API
node = client.reload(node)
node_annotations = node.annotations.data_dict()
assert annotation_key not in node_annotations
# check via kubectl
node_name = node.nodeName
command = " get nodes " + node_name
print(command)
node_detail = execute_kubectl_cmd(command)
print(node_detail["metadata"]["annotations"])
assert annotation_key not in node_detail["metadata"]["annotations"]
def delete_node_annotation(annotation_key, node, client):
"""
:param annotation_key: annotation to be deleted on the node
:param node: node in cluster
:param client: client
:return:
"""
node = client.reload(node)
node_annotations = node.annotations.data_dict()
del node_annotations[annotation_key]
client.update(node, annotations=node_annotations)
| 28,779 | 33.716526 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_pipeline.py
|
import pytest
import time
import urllib
import re
from .common import CATTLE_TEST_URL
from .common import USER_TOKEN
from .common import DNS_REGEX
from .common import create_connection
from .common import create_kubeconfig
from .common import create_project_and_ns
from .common import get_cluster_client_for_token
from .common import get_global_admin_client_and_cluster
from .common import get_project_client_for_token
from .common import random_str
from .common import random_test_name
from .common import wait_for_condition
from .common import WebsocketLogParse
pipeline_details = {"p_client": None, "ns": None, "cluster": None,
"project": None, "pipeline": None, "pipeline_run": None}
PIPELINE_TIMEOUT = 600
PIPELINE_REPO_URL = "https://github.com/rancher/pipeline-example-go.git"
def test_pipeline():
pipeline = create_example_pipeline()
assert len(pipeline_details["p_client"].list_pipeline(
projectId=pipeline_details["project"].id).data) > 0
print("Created Pipeline, running example...")
pipeline_details["pipeline"] = pipeline
pipeline_details["pipeline_run"] = pipeline_details["p_client"].action(
obj=pipeline,
action_name='run',
branch="master")
wait_for_condition(
pipeline_details["p_client"], pipeline,
check_last_run_state("Success"), timeout=PIPELINE_TIMEOUT)
assert len(pipeline_view_logs()) > 1
print("Cleaning up...")
pipeline_details["p_client"].delete(pipeline)
assert len(pipeline_details["p_client"].list_pipeline(
projectId=pipeline_details["project"].id).data) == 0
print("\nDeleted Pipeline")
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_global_admin_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(
USER_TOKEN, cluster, random_test_name("testworkload"))
p_client = get_project_client_for_token(p, USER_TOKEN)
pipeline_details["p_client"] = p_client
pipeline_details["ns"] = ns
pipeline_details["cluster"] = cluster
pipeline_details["project"] = p
def fin():
# project delete doesn't remove all ns in test, removing manually
client = get_cluster_client_for_token(pipeline_details["cluster"],
USER_TOKEN)
for ns_x in client.list_namespace(
projectId=pipeline_details["project"].id):
client.delete(ns_x)
client.delete(pipeline_details["project"])
request.addfinalizer(fin)
def check_last_run_state(status):
def _find_condition(resource):
if not hasattr(resource, "lastRunState"):
return False
if resource.lastRunState is None:
return False
if resource.lastRunState == status:
return True
if resource.lastRunState == "Failed":
return False
return False
return _find_condition
def create_example_pipeline():
return pipeline_details["p_client"].create_pipeline(
name="test-" + random_str(),
repositoryUrl=PIPELINE_REPO_URL,
triggerWebhookPr=False,
triggerWebhookPush=False,
triggerWebhookTag=False)
def pipeline_view_logs():
# using a regex to get the dns from the CATTLE_TEST_URL
search_result = re.search(DNS_REGEX, CATTLE_TEST_URL)
dns = search_result.group(2)
url_base = 'wss://' + dns + \
'/v3/projects/' + pipeline_details["project"].id + \
'/pipelineExecutions/' + pipeline_details["pipeline_run"].id + \
'/log?'
params_dict = {
"stage": 1,
"step": 0
}
params = urllib.parse.urlencode(params_dict, doseq=True,
quote_via=urllib.parse.quote, safe='()')
url = url_base + "&" + params
ws = create_connection(url, None)
logparse = WebsocketLogParse()
logparse.start_thread(target=logparse.receiver, args=(ws, False, False))
# wait on thread to report any logs.
while len(logparse.last_message) < 1:
time.sleep(2)
logs = '\noutput:\n' + logparse.last_message + '\n'
print(logs)
# + is on every line of any given log
assert '+' in logparse.last_message, \
"failed to view logs"
logparse.last_message = ''
ws.close()
return logs
| 4,525 | 35.208 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_import_k3s_cluster.py
|
from python_terraform import * # NOQA
from .common import * # NOQA
RANCHER_REGION = os.environ.get("AWS_REGION")
RANCHER_VPC_ID = os.environ.get("AWS_VPC")
RANCHER_SUBNETS = os.environ.get("AWS_SUBNET")
RANCHER_AWS_SG = os.environ.get("AWS_SECURITY_GROUPS")
RANCHER_AVAILABILITY_ZONE = os.environ.get("AWS_AVAILABILITY_ZONE")
RANCHER_AWS_AMI = os.environ.get("AWS_AMI", "")
RANCHER_AWS_USER = os.environ.get("AWS_USER", "ubuntu")
HOST_NAME = os.environ.get('RANCHER_HOST_NAME', "sa")
RANCHER_K3S_VERSION = os.environ.get("RANCHER_K3S_VERSION", "")
RANCHER_K3S_VERSIONS = os.environ.get('RANCHER_K3S_VERSIONS', "").split(",")
RANCHER_K3S_NO_OF_SERVER_NODES = \
os.environ.get("RANCHER_K3S_NO_OF_SERVER_NODES", 2)
RANCHER_K3S_NO_OF_WORKER_NODES = \
os.environ.get("RANCHER_K3S_NO_OF_WORKER_NODES", 0)
RANCHER_K3S_SERVER_FLAGS = os.environ.get("RANCHER_K3S_SERVER_FLAGS", "")
RANCHER_K3S_WORKER_FLAGS = os.environ.get("RANCHER_K3S_WORKER_FLAGS", "agent")
RANCHER_QA_SPACE = os.environ.get("RANCHER_QA_SPACE", "qa.rancher.space.")
RANCHER_EC2_INSTANCE_CLASS = os.environ.get("RANCHER_EC2_INSTANCE_CLASS",
"t3a.medium")
RANCHER_EXTERNAL_DB = os.environ.get("RANCHER_EXTERNAL_DB", "mysql")
RANCHER_EXTERNAL_DB_VERSION = os.environ.get("RANCHER_EXTERNAL_DB_VERSION")
RANCHER_DB_GROUP_NAME = os.environ.get("RANCHER_DB_GROUP_NAME")
RANCHER_DB_MAX_CONNECTIONS = os.environ.get("RANCHER_DB_MAX_CONNECTIONS", 80)
RANCHER_INSTANCE_CLASS = os.environ.get("RANCHER_INSTANCE_CLASS",
"db.t2.micro")
RANCHER_DB_USERNAME = os.environ.get("RANCHER_DB_USERNAME", "adminuser")
RANCHER_DB_PASSWORD = os.environ.get("RANCHER_DB_PASSWORD", "")
RANCHER_K3S_KUBECONFIG_PATH = DATA_SUBDIR + "/k3s_kubeconfig.yaml"
RANCHER_NODE_OS = os.environ.get("RANCHER_NODE_OS", "ubuntu")
RANCHER_INSTALL_MODE = os.environ.get("RANCHER_INSTALL_MODE", "INSTALL_K3S_VERSION")
RANCHER_RDS_ENVIRONMENT = os.environ.get("RANCHER_RDS_ENVIRONMENT", "dev")
RANCHER_RDS_ENGINE_MODE = os.environ.get("RANCHER_RDS_ENGINE_MODE", "provisioned")
RANCHER_CLUSTER_TYPE = os.environ.get("RANCHER_CLUSTER_TYPE", "external_db")
AWS_VOLUME_SIZE = os.environ.get("AWS_VOLUME_SIZE", "8")
RANCHER_RHEL_USERNAME = os.environ.get("RANCHER_RHEL_USERNAME")
RANCHER_RHEL_PASSWORD = os.environ.get("RANCHER_RHEL_PASSWORD")
def test_create_k3s_single_control_cluster():
aws_nodes, client, k3s_clusterfilepath = create_single_control_cluster()
def test_create_k3s_multiple_control_cluster():
k3s_clusterfilepath = create_multiple_control_cluster()
def test_import_k3s_single_control_cluster():
aws_nodes, client, k3s_clusterfilepath = create_single_control_cluster()
cluster = create_rancher_cluster(client, k3s_clusterfilepath)
cluster_cleanup(client, cluster, aws_nodes)
def test_import_k3s_multiple_control_cluster():
client = get_user_client()
k3s_clusterfilepath = create_multiple_control_cluster()
cluster = create_rancher_cluster(client, k3s_clusterfilepath)
def test_delete_k3s():
delete_resource_in_AWS_by_prefix(RANCHER_HOSTNAME_PREFIX)
def create_single_control_cluster():
# Get URL and User_Token
client = get_user_client()
# Create nodes in AWS
aws_nodes = create_nodes()
# Install k3s on master node
kubeconfig, node_token = install_k3s_master_node(aws_nodes[0])
# Join worker nodes
join_k3s_worker_nodes(aws_nodes[0], aws_nodes[1:], node_token)
# Verify cluster health
verify_cluster_health(aws_nodes[0])
# Update master node IP in kubeconfig file
localhost = "127.0.0.1"
kubeconfig = kubeconfig.replace(localhost, aws_nodes[0].public_ip_address)
k3s_kubeconfig_file = "k3s_kubeconfig.yaml"
k3s_clusterfilepath = create_kube_config_file(kubeconfig, k3s_kubeconfig_file)
print(k3s_clusterfilepath)
k3s_kubeconfig_file = "k3s_kubeconfig.yaml"
k3s_clusterfilepath = DATA_SUBDIR + "/" + k3s_kubeconfig_file
is_file = os.path.isfile(k3s_clusterfilepath)
assert is_file
with open(k3s_clusterfilepath, 'r') as f:
print(f.read())
cmd = "kubectl get nodes -o wide --kubeconfig=" + k3s_clusterfilepath
print(run_command(cmd))
cmd = "kubectl get pods -A -o wide --kubeconfig=" + k3s_clusterfilepath
print(run_command(cmd))
return aws_nodes, client, k3s_clusterfilepath
def create_multiple_control_cluster():
global RANCHER_EXTERNAL_DB_VERSION
global RANCHER_DB_GROUP_NAME
k3s_kubeconfig_file = "k3s_kubeconfig.yaml"
k3s_clusterfilepath = DATA_SUBDIR + "/" + k3s_kubeconfig_file
tf_dir = DATA_SUBDIR + "/" + "terraform/k3s/master"
keyPath = os.path.abspath('.') + '/.ssh/' + AWS_SSH_KEY_NAME
os.chmod(keyPath, 0o400)
no_of_servers = int(RANCHER_K3S_NO_OF_SERVER_NODES)
no_of_servers = no_of_servers - 1
if RANCHER_EXTERNAL_DB == "MariaDB":
RANCHER_EXTERNAL_DB_VERSION = "10.3.20" if not RANCHER_EXTERNAL_DB_VERSION \
else RANCHER_EXTERNAL_DB_VERSION
RANCHER_DB_GROUP_NAME = "mariadb10.3" if not RANCHER_DB_GROUP_NAME \
else RANCHER_DB_GROUP_NAME
elif RANCHER_EXTERNAL_DB == "postgres":
RANCHER_EXTERNAL_DB_VERSION = "11.5" if not RANCHER_EXTERNAL_DB_VERSION \
else RANCHER_EXTERNAL_DB_VERSION
RANCHER_DB_GROUP_NAME = "postgres11" if not RANCHER_DB_GROUP_NAME \
else RANCHER_DB_GROUP_NAME
elif RANCHER_EXTERNAL_DB == "aurora-mysql":
RANCHER_EXTERNAL_DB_VERSION = "5.7.mysql_aurora.2.09.0" if not RANCHER_EXTERNAL_DB_VERSION \
else RANCHER_EXTERNAL_DB_VERSION
RANCHER_DB_GROUP_NAME = "aurora-mysql5.7" if not RANCHER_DB_GROUP_NAME \
else RANCHER_DB_GROUP_NAME
else:
RANCHER_EXTERNAL_DB_VERSION = "5.7" if not RANCHER_EXTERNAL_DB_VERSION \
else RANCHER_EXTERNAL_DB_VERSION
RANCHER_DB_GROUP_NAME = "mysql5.7" if not RANCHER_DB_GROUP_NAME \
else RANCHER_DB_GROUP_NAME
tf = Terraform(working_dir=tf_dir,
variables={'region': RANCHER_REGION,
'vpc_id': RANCHER_VPC_ID,
'subnets': RANCHER_SUBNETS,
'sg_id': RANCHER_AWS_SG,
'availability_zone': RANCHER_AVAILABILITY_ZONE,
'aws_ami': RANCHER_AWS_AMI,
'aws_user': RANCHER_AWS_USER,
'resource_name': RANCHER_HOSTNAME_PREFIX,
'access_key': keyPath,
'external_db': RANCHER_EXTERNAL_DB,
'external_db_version': RANCHER_EXTERNAL_DB_VERSION,
'db_group_name': RANCHER_DB_GROUP_NAME,
'instance_class': RANCHER_INSTANCE_CLASS,
'max_connections': RANCHER_DB_MAX_CONNECTIONS,
'ec2_instance_class': RANCHER_EC2_INSTANCE_CLASS,
'db_username': RANCHER_DB_USERNAME,
'db_password': RANCHER_DB_PASSWORD,
'k3s_version': RANCHER_K3S_VERSION,
'no_of_server_nodes': no_of_servers,
'server_flags': RANCHER_K3S_SERVER_FLAGS,
'qa_space': RANCHER_QA_SPACE,
'node_os': RANCHER_NODE_OS,
'username': RANCHER_RHEL_USERNAME,
'password': RANCHER_RHEL_PASSWORD,
'install_mode': RANCHER_INSTALL_MODE,
'engine_mode': RANCHER_RDS_ENGINE_MODE,
'environment': RANCHER_RDS_ENVIRONMENT,
'cluster_type': RANCHER_CLUSTER_TYPE,
'volume_size': AWS_VOLUME_SIZE})
print("Creating cluster")
tf.init()
tf.plan(out="plan_server.out")
print(tf.apply("--auto-approve"))
if int(RANCHER_K3S_NO_OF_WORKER_NODES) > 0:
tf_dir = DATA_SUBDIR + "/" + "terraform/k3s/worker"
tf = Terraform(working_dir=tf_dir,
variables={'region': RANCHER_REGION,
'vpc_id': RANCHER_VPC_ID,
'subnets': RANCHER_SUBNETS,
'sg_id': RANCHER_AWS_SG,
'availability_zone': RANCHER_AVAILABILITY_ZONE,
'aws_ami': RANCHER_AWS_AMI,
'aws_user': RANCHER_AWS_USER,
'ec2_instance_class': RANCHER_EC2_INSTANCE_CLASS,
'resource_name': RANCHER_HOSTNAME_PREFIX,
'access_key': keyPath,
'k3s_version': RANCHER_K3S_VERSION,
'no_of_worker_nodes': int(RANCHER_K3S_NO_OF_WORKER_NODES),
'node_os': RANCHER_NODE_OS,
'username': RANCHER_RHEL_USERNAME,
'password': RANCHER_RHEL_PASSWORD,
'install_mode': RANCHER_INSTALL_MODE,
'volume_size': AWS_VOLUME_SIZE,
'worker_flags': RANCHER_K3S_WORKER_FLAGS})
print("Joining worker nodes")
tf.init()
tf.plan(out="plan_worker.out")
print(tf.apply("--auto-approve"))
cmd = "cp /tmp/" + RANCHER_HOSTNAME_PREFIX + "_kubeconfig " + k3s_clusterfilepath
os.system(cmd)
is_file = os.path.isfile(k3s_clusterfilepath)
assert is_file
print(k3s_clusterfilepath)
with open(k3s_clusterfilepath, 'r') as f:
print(f.read())
print("K3s Cluster Created")
cmd = "kubectl get nodes -o wide --kubeconfig=" + k3s_clusterfilepath
print(run_command(cmd))
cmd = "kubectl get pods -o wide -A --kubeconfig=" + k3s_clusterfilepath
print(run_command(cmd))
return k3s_clusterfilepath
def create_rancher_cluster(client, k3s_clusterfilepath):
if CLUSTER_NAME:
clustername = CLUSTER_NAME
else:
clustername = random_test_name("testcustom-k3s")
cluster = client.create_cluster(name=clustername)
cluster_token = create_custom_host_registration_token(client, cluster)
command = cluster_token.insecureCommand
finalimportcommand = command + " --kubeconfig " + k3s_clusterfilepath
result = run_command(finalimportcommand)
clusters = client.list_cluster(name=clustername).data
assert len(clusters) > 0
print("Cluster is")
print(clusters[0])
# Validate the cluster
cluster = validate_cluster(client, clusters[0],
check_intermediate_state=False)
return cluster
def create_nodes():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
int(RANCHER_K3S_NO_OF_WORKER_NODES),
random_test_name("testcustom-k3s"+"-"+HOST_NAME))
assert len(aws_nodes) == int(RANCHER_K3S_NO_OF_WORKER_NODES)
for aws_node in aws_nodes:
print("AWS NODE PUBLIC IP {}".format(aws_node.public_ip_address))
return aws_nodes
def install_k3s_master_node(master):
# Connect to the node and install k3s on master
print("K3s VERSION {}".format(RANCHER_K3S_VERSION))
cmd = "curl -sfL https://get.k3s.io | \
{} sh -s - server --node-external-ip {}".\
format("INSTALL_K3S_VERSION={}".format(RANCHER_K3S_VERSION) if RANCHER_K3S_VERSION \
else "", master.public_ip_address)
print("Master Install {}".format(cmd))
install_result = master.execute_command(cmd)
print(install_result)
# Get node token from master
cmd = "sudo cat /var/lib/rancher/k3s/server/node-token"
print(cmd)
node_token = master.execute_command(cmd)
print(node_token)
# Get kube_config from master
cmd = "sudo cat /etc/rancher/k3s/k3s.yaml"
kubeconfig = master.execute_command(cmd)
print(kubeconfig)
print("NO OF WORKER NODES: {}".format(RANCHER_K3S_NO_OF_WORKER_NODES))
print("NODE TOKEN: \n{}".format(node_token))
print("KUBECONFIG: \n{}".format(kubeconfig))
return kubeconfig[0].strip("\n"), node_token[0].strip("\n")
def join_k3s_worker_nodes(master, workers, node_token):
for worker in workers:
cmd = "curl -sfL https://get.k3s.io | \
{} K3S_URL=https://{}:6443 K3S_TOKEN={} sh -s - ". \
format("INSTALL_K3S_VERSION={}".format(RANCHER_K3S_VERSION) \
if RANCHER_K3S_VERSION else "", master.public_ip_address, node_token)
cmd = cmd + " {} {}".format("--node-external-ip", worker.public_ip_address)
print("Joining k3s master")
print(cmd)
install_result = worker.execute_command(cmd)
print(install_result)
def verify_cluster_health(master):
cmd = "sudo k3s kubectl get nodes"
install_result = master.execute_command(cmd)
print(install_result)
def create_kube_config_file(kubeconfig, k3s_kubeconfig_file):
k3s_clusterfilepath = DATA_SUBDIR + "/" + k3s_kubeconfig_file
f = open(k3s_clusterfilepath, "w")
f.write(kubeconfig)
f.close()
return k3s_clusterfilepath
| 13,346 | 43.342193 | 100 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_upgrade.py
|
import base64
import pytest
from .common import * # NOQA
from .test_secrets import (
create_and_validate_workload_with_secret_as_env_variable,
create_and_validate_workload_with_secret_as_volume,
validate_workload_with_secret,
create_secret)
from .test_service_discovery import create_dns_record
cluster_name = CLUSTER_NAME
validate_prefix = os.environ.get('RANCHER_VALIDATE_RESOURCES_PREFIX', "step0")
create_prefix = os.environ.get('RANCHER_CREATE_RESOURCES_PREFIX', "step1")
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None,
"testclient_pods": []}
upgrade_check_stage = os.environ.get('RANCHER_UPGRADE_CHECK', "preupgrade")
validate_ingress = \
ast.literal_eval(os.environ.get('RANCHER_INGRESS_CHECK', "True"))
sshUser = os.environ.get('RANCHER_SSH_USER', "ubuntu")
rancherVersion = os.environ.get('RANCHER_SERVER_VERSION', "master")
upgradeVersion = os.environ.get('RANCHER_SERVER_VERSION_UPGRADE', "master")
upgradeImage = os.environ.get('RANCHER_UPGRADE_IMAGE', "rancher/rancher")
CLUSTER_VERSION = os.environ.get('RANCHER_CLUSTER_UPGRADE_VERSION', "")
value = base64.b64encode(b"valueall")
keyvaluepair = {"testall": value.decode('utf-8')}
wl_name = "-testwl"
sd_name = "-testsd"
sd_wlname1 = "-testsd1"
sd_wlname2 = "-testsd2"
ingress_name1 = "-testingress1"
ingress_name2 = "-testingress2"
ingress_wlname1 = "-testingresswl1"
ingress_wlname2 = "-testingresswl2"
project_name = "-p1"
ns_name1 = "-ns1"
ns_name2 = "-ns2"
wl_name_create = create_prefix + wl_name
sd_name_create = create_prefix + sd_name
sd_wlname1_create = create_prefix + sd_wlname1
sd_wlname2_create = create_prefix + sd_wlname2
ingress_name1_create = create_prefix + ingress_name1
ingress_name2_create = create_prefix + ingress_name2
ingress_wlname1_create = create_prefix + ingress_wlname1
ingress_wlname2_create = create_prefix + ingress_wlname2
wl_name_validate = validate_prefix + wl_name
sd_name_validate = validate_prefix + sd_name
sd_wlname1_validate = validate_prefix + sd_wlname1
sd_wlname2_validate = validate_prefix + sd_wlname2
ingress_name1_validate = validate_prefix + ingress_name1
ingress_name2_validate = validate_prefix + ingress_name2
ingress_wlname1_validate = validate_prefix + ingress_wlname1
ingress_wlname2_validate = validate_prefix + ingress_wlname2
secret_name = create_prefix + "-testsecret"
secret_wl_name1_create = create_prefix + "-testwl1withsec"
secret_wl_name2_create = create_prefix + "-testwl2withsec"
secret_wl_name1_validate = validate_prefix + "-testwl1withsec"
secret_wl_name2_validate = validate_prefix + "-testwl2withsec"
app_ns = create_prefix + "-app-ns"
app_create_name = create_prefix + "-app"
app_validate_name = validate_prefix + "-app"
# the pre_upgrade_externalId is for launching an app
pre_upgrade_externalId = \
create_catalog_external_id("test-catalog", "mysql", "1.3.1")
# the post_upgrade_externalId is for upgrading the existing app
post_upgrade_externalId = \
create_catalog_external_id("test-catalog", "mysql", "1.3.2")
catalogUrl = "https://github.com/rancher/integration-test-charts.git"
catalogBranch = "validation-tests"
if_post_upgrade = pytest.mark.skipif(
upgrade_check_stage != "postupgrade",
reason='This test is not executed for PreUpgrade checks')
if_pre_upgrade = pytest.mark.skipif(
upgrade_check_stage != "preupgrade",
reason='This test is not executed for PreUpgrade checks')
if_validate_ingress = pytest.mark.skipif(
validate_ingress is False,
reason='This test is not executed')
if_upgrade_rancher = pytest.mark.skipif(
upgrade_check_stage != "upgrade_rancher",
reason='This test is only for testing upgrading Rancher')
if_upgrade_cluster = pytest.mark.skipif(
upgrade_check_stage != "upgrade_cluster",
reason='This test is only for testing upgrading clusters')
@if_post_upgrade
@pytest.mark.run(order=1)
def test_validate_existing_project_resources():
validate_existing_project_resources()
@if_post_upgrade
@pytest.mark.run(order=2)
def test_validate_existing_wl():
validate_wl(wl_name_validate)
@if_post_upgrade
@pytest.mark.run(order=2)
def test_validate_existing_service_discovery():
validate_service_discovery_upgrade(sd_name_validate,
[sd_wlname1_validate, sd_wlname2_validate])
@if_post_upgrade
@pytest.mark.run(order=2)
def test_validate_existing_wl_with_secret():
validate_worklaods_with_secret(
secret_wl_name1_validate, secret_wl_name2_validate)
# It's hard to find an App to support Windows case for now.
# Could we make an App to support both Windows and Linux?
@skip_test_windows_os
@skip_test_hardened
@if_post_upgrade
@pytest.mark.run(order=2)
def test_validate_existing_catalog_app():
validate_catalog_app(app_validate_name, pre_upgrade_externalId)
@if_post_upgrade
@if_validate_ingress
@pytest.mark.run(order=2)
def test_validate_existing_ingress_daemon():
validate_ingress_xip_io(ingress_name1_validate,
ingress_wlname1_validate)
@if_post_upgrade
@if_validate_ingress
@pytest.mark.run(order=2)
def test_validate_existing_ingress_wl():
validate_ingress_xip_io(ingress_name2_validate,
ingress_wlname2_validate)
@if_post_upgrade
@pytest.mark.run(order=3)
def test_modify_workload_validate_deployment():
modify_workload_validate_deployment()
@if_post_upgrade
@pytest.mark.run(order=3)
def test_modify_workload_validate_sd():
modify_workload_validate_sd()
@if_post_upgrade
@pytest.mark.run(order=3)
def test_modify_workload_validate_secret():
modify_workload_validate_secret()
# It's hard to find an App to support Windows case for now.
# Could we make an App to support both Windows and Linux?
@skip_test_windows_os
@skip_test_hardened
@if_post_upgrade
@pytest.mark.run(order=3)
def test_modify_catalog_app():
modify_catalog_app()
@if_post_upgrade
@if_validate_ingress
@pytest.mark.run(order=3)
def test_modify_workload_validate_ingress():
modify_workload_validate_ingress()
@pytest.mark.run(order=4)
def test_create_project_resources():
create_project_resources()
@pytest.mark.run(order=5)
def test_create_and_validate_wl():
create_and_validate_wl()
@pytest.mark.run(order=5)
def test_create_and_validate_service_discovery():
create_and_validate_service_discovery()
@pytest.mark.run(order=5)
def test_create_validate_wokloads_with_secret():
create_validate_wokloads_with_secret()
@if_validate_ingress
@pytest.mark.run(order=5)
def test_create_and_validate_ingress_xip_io_daemon():
create_and_validate_ingress_xip_io_daemon()
@if_validate_ingress
@pytest.mark.run(order=5)
def test_create_and_validate_ingress_xip_io_wl():
create_and_validate_ingress_xip_io_wl()
# It's hard to find an App to support Windows case for now.
# Could we make an App to support both Windows and Linux?
@skip_test_hardened
@pytest.mark.run(order=5)
def test_create_and_validate_catalog_app():
create_and_validate_catalog_app()
@pytest.mark.run(order=6)
def test_create_and_validate_ip_address_pods():
create_and_validate_ip_address_pods()
# the flag if_upgarde_rancher is false all the time
# because we do not have this option for the variable RANCHER_UPGRADE_CHECK
# instead, we will have a new pipeline that calls this function directly
@if_upgrade_rancher
def test_rancher_upgrade():
upgrade_rancher_server(CATTLE_TEST_URL)
client = get_user_client()
version = client.list_setting(name="server-version").data[0].value
assert version == upgradeVersion
# the flag if_upgrade_cluster is false all the time
# because we do not have this option for the variable RANCHER_UPGRADE_CHECK
# instead, we will have a new pipeline that calls this function directly
@if_upgrade_cluster
def test_cluster_upgrade():
upgrade_cluster()
wait_for_ready_nodes()
def create_and_validate_wl():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
p_client.create_workload(name=wl_name_create, containers=con,
namespaceId=ns.id, scale=2)
validate_wl(wl_name_create)
def validate_wl(workload_name, pod_count=2):
p_client = namespace["p_client"]
ns = namespace["ns"]
workloads = p_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == 1
workload = workloads[0]
validate_workload(
p_client, workload, "deployment", ns.name, pod_count=pod_count)
validate_service_discovery_upgrade(workload_name, [workload_name])
def create_and_validate_ingress_xip_io_daemon():
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
# Ingress with daemonSet target
workload = p_client.create_workload(name=ingress_wlname1_create,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
path = "/name.html"
rule = {"host": "xip.io",
"paths":
[{"workloadIds": [workload.id], "targetPort": TEST_IMAGE_PORT,
"path": path}]}
p_client.create_ingress(name=ingress_name1_create,
namespaceId=ns.id,
rules=[rule])
validate_ingress_xip_io(ingress_name1_create, ingress_wlname1_create)
def create_and_validate_ingress_xip_io_wl():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
# Ingress with Deployment target
workload = p_client.create_workload(name=ingress_wlname2_create,
containers=con,
namespaceId=ns.id, scale=2)
validate_wl(ingress_wlname2_create, 2)
path = "/name.html"
rule = {"host": "xip.io",
"paths":
[{"workloadIds": [workload.id], "targetPort": TEST_IMAGE_PORT,
"path": path}]}
p_client.create_ingress(name=ingress_name2_create,
namespaceId=ns.id,
rules=[rule])
validate_ingress_xip_io(ingress_name2_create, ingress_wlname2_create)
def modify_workload_validate_deployment():
# This method increments the deployment scale and validates it
p_client = namespace["p_client"]
ns = namespace["ns"]
workload = p_client.list_workload(
name=wl_name_validate, namespace=validate_prefix + ns.id).data[0]
p_client.update(workload, scale=4, containers=workload.containers)
validate_wl(wl_name_validate, 4)
def modify_workload_validate_ingress():
# This method increments the workload scale and validates the ingress
# pointing to it
p_client = namespace["p_client"]
ns = namespace["ns"]
# Get workload and update
ing_workload = p_client.list_workload(
name=ingress_wlname2_validate, namespace=ns.id).data[0]
print(ing_workload)
# Increment workload
ing_workload = p_client.update(ing_workload, scale=4,
containers=ing_workload.containers)
wait_for_pods_in_workload(p_client, ing_workload, 4)
validate_wl(ing_workload.name, 4)
# Validate ingress after workload scale up
validate_ingress_xip_io(ingress_name2_validate, ingress_wlname2_validate)
def modify_workload_validate_sd():
# This method increments the workload scale and validates
# service discovery
p_client = namespace["p_client"]
ns = namespace["ns"]
# Get sd workloads and validate service discovery
sd_workload = p_client.list_workload(
name=sd_wlname2_validate, namespace=ns.id).data[0]
p_client.update(sd_workload, scale=3, containers=sd_workload.containers)
validate_wl(sd_wlname2_validate, 3)
validate_service_discovery_upgrade(sd_name_validate,
[sd_wlname1_validate,
sd_wlname2_validate])
def modify_workload_validate_secret():
# This method increments the scale of worlkoad with secret and validates it
p_client = namespace["p_client"]
ns = namespace["ns"]
secret_workload1 = p_client.list_workload(
name=secret_wl_name1_validate, namespace=ns.id).data[0]
secret_workload1 = p_client.update(secret_workload1, scale=3,
containers=secret_workload1.containers)
wait_for_pods_in_workload(p_client, secret_workload1, 3)
validate_workload_with_secret(
p_client, secret_workload1, "deployment", ns.name,
keyvaluepair, workloadwithsecretasVolume=True, podcount=3)
secret_workload2 = p_client.list_workload(name=secret_wl_name2_validate,
namespace=ns.id).data[0]
secret_workload2 = p_client.update(secret_workload2, scale=3,
containers=secret_workload2.containers)
wait_for_pods_in_workload(p_client, secret_workload2, 3)
validate_workload_with_secret(
p_client, secret_workload2, "deployment", ns.name,
keyvaluepair, workloadwithsecretasenvvar=True, podcount=3)
def validate_ingress_xip_io(ing_name, workload_name):
p_client = namespace["p_client"]
ns = namespace["ns"]
workloads = p_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == 1
workload = workloads[0]
ingresses = p_client.list_ingress(name=ing_name,
namespaceId=ns.id).data
assert len(ingresses) == 1
ingress = ingresses[0]
validate_ingress_using_endpoint(p_client, ingress, [workload])
def create_and_validate_service_discovery():
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
workload = p_client.create_workload(name=sd_wlname1_create,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
additional_workload = p_client.create_workload(name=sd_wlname2_create,
containers=con,
namespaceId=ns.id,
scale=1)
wait_for_wl_to_active(p_client, additional_workload)
awl_pods = wait_for_pods_in_workload(p_client, additional_workload, 1)
wait_for_pod_to_running(p_client, awl_pods[0])
record = {"type": "dnsRecord",
"targetWorkloadIds": [workload["id"], additional_workload["id"]],
"name": sd_name_create,
"namespaceId": ns.id}
create_dns_record(record, p_client)
validate_service_discovery_upgrade(sd_name_create,
[sd_wlname1_create, sd_wlname2_create])
def validate_service_discovery_upgrade(sd_record_name, workload_names):
p_client = namespace["p_client"]
ns = namespace["ns"]
target_wls = []
for wl_name_create in workload_names:
workloads = p_client.list_workload(
name=wl_name_create, namespaceId=ns.id).data
assert len(workloads) == 1
workload = workloads[0]
target_wls.append(workload)
records = p_client.list_dns_record(
name=sd_record_name, namespaceId=ns.id).data
assert len(records) == 1
record = records[0]
testclient_pods = namespace["testclient_pods"]
expected_ips = []
for wl in target_wls:
pods = p_client.list_pod(workloadId=wl["id"]).data
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
assert len(testclient_pods) > 0
for pod in testclient_pods:
validate_dns_record(pod, record, expected_ips)
def create_validate_wokloads_with_secret():
p_client = namespace["p_client"]
ns = namespace["ns"]
secret_name = create_prefix + "-testsecret"
secret_wl_name_create1 = create_prefix + "-testwl1withsec"
secret_wl_name_create2 = create_prefix + "-testwl2withsec"
secret = create_secret(keyvaluepair, p_client=p_client, name=secret_name)
create_and_validate_workload_with_secret_as_volume(
p_client, secret, ns, keyvaluepair, name=secret_wl_name_create1)
create_and_validate_workload_with_secret_as_env_variable(
p_client, secret, ns, keyvaluepair, name=secret_wl_name_create2)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client = get_user_client()
admin_client = get_admin_client()
clusters = client.list_cluster(name=cluster_name).data
assert len(clusters) == 1
cluster = clusters[0]
create_kubeconfig(cluster)
namespace["cluster"] = cluster
if len(admin_client.list_catalog(name="test-catalog")) == 0:
catalog = admin_client.create_catalog(
name="test-catalog",
baseType="catalog",
branch=catalogBranch,
kind="helm",
url=catalogUrl)
catalog = wait_for_catalog_active(admin_client, catalog)
def create_project_resources():
cluster = namespace["cluster"]
p, ns = create_project_and_ns(USER_TOKEN, cluster,
project_name=create_prefix + project_name,
ns_name=create_prefix + ns_name1)
p_client = get_project_client_for_token(p, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["project"] = p
namespace["testclient_pods"] = []
# Create pods in existing namespace and new namespace that will be used
# as test clients from which DNS resolution will be tested
wlname = create_prefix + "-testsdclient"
con = [{"name": "test1",
"image": TEST_IMAGE}]
workload = p_client.create_workload(name=wlname,
containers=con,
namespaceId=ns.id,
scale=1)
wait_for_wl_to_active(p_client, workload)
namespace["workload"] = workload
pods = wait_for_pods_in_workload(p_client, workload, 1)
pod = wait_for_pod_to_running(p_client, pods[0])
namespace["testclient_pods"].append(pod)
new_ns = create_ns(get_cluster_client_for_token(cluster, USER_TOKEN),
cluster, p, ns_name=create_prefix + ns_name2)
workload = p_client.create_workload(name=wlname,
containers=con,
namespaceId=new_ns.id,
scale=1)
wait_for_wl_to_active(p_client, workload)
pods = wait_for_pods_in_workload(p_client, workload, 1)
pod = wait_for_pod_to_running(p_client, pods[0])
namespace["testclient_pods"].append(pod)
assert len(namespace["testclient_pods"]) == 2
def validate_existing_project_resources():
cluster = namespace["cluster"]
p_name = validate_prefix + project_name
ns_name = validate_prefix + ns_name1
ns2_name = validate_prefix + ns_name2
# Get existing project
client = get_user_client()
projects = client.list_project(name=p_name,
clusterId=cluster.id).data
assert len(projects) == 1
project = projects[0]
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
p_client = get_project_client_for_token(project, USER_TOKEN)
# Get existing namespace
nss = c_client.list_namespace(name=ns_name).data
assert len(nss) == 1
ns = nss[0]
# 2nd namespace
nss = c_client.list_namespace(name=ns2_name).data
assert len(nss) == 1
ns2 = nss[0]
# Get existing SD client pods
workload_name = validate_prefix + "-testsdclient"
workloads = p_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == 1
wl1_pods = p_client.list_pod(workloadId=workloads[0].id).data
assert len(wl1_pods) == 1
workload_name = validate_prefix + "-testsdclient"
workloads = p_client.list_workload(name=workload_name,
namespaceId=ns2.id).data
assert len(workloads) == 1
wl2_pods = p_client.list_pod(workloadId=workloads[0].id).data
assert len(wl2_pods) == 1
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["project"] = project
namespace["testclient_pods"] = [wl1_pods[0], wl2_pods[0]]
def validate_worklaods_with_secret(workload_name1, workload_name2):
p_client = namespace["p_client"]
ns = namespace["ns"]
wk1 = p_client.list_workload(name=workload_name1, namespace=ns.id).data[0]
wk2 = p_client.list_workload(name=workload_name2, namespace=ns.id).data[0]
validate_workload_with_secret(
p_client, wk1, "deployment", ns.name, keyvaluepair,
workloadwithsecretasVolume=True)
validate_workload_with_secret(
p_client, wk2, "deployment", ns.name, keyvaluepair,
workloadwithsecretasenvvar=True)
def upgrade_rancher_server(serverIp,
sshKeyPath=".ssh/jenkins-rke-validation.pem",
containerName="rancher-server"):
if serverIp.startswith('https://'):
serverIp = serverIp[8:]
stopCommand = "docker stop " + containerName
print(exec_shell_command(serverIp, 22, stopCommand, "",
sshUser, sshKeyPath))
createVolumeCommand = "docker create --volumes-from " + containerName + \
" --name rancher-data rancher/rancher:" + \
rancherVersion
print(exec_shell_command(serverIp, 22, createVolumeCommand, "",
sshUser, sshKeyPath))
removeCommand = "docker rm " + containerName
print(exec_shell_command(serverIp, 22, removeCommand, "",
sshUser, sshKeyPath))
runCommand = "docker run -d --volumes-from rancher-data " \
"--restart=unless-stopped " \
"-p 80:80 -p 443:443 " + upgradeImage + ":" + upgradeVersion + \
" --trace"
print(exec_shell_command(serverIp, 22, runCommand, "",
sshUser, sshKeyPath))
wait_until_active(CATTLE_TEST_URL)
def upgrade_cluster():
print("Upgrading cluster {} to version {}".format(
CLUSTER_NAME, CLUSTER_VERSION))
client, cluster = get_user_client_and_cluster()
if "k3sConfig" in cluster:
k3s_config = cluster.k3sConfig
k3s_updated_config = k3s_config.copy()
k3s_updated_config["kubernetesVersion"] = CLUSTER_VERSION
client.update(cluster, name=cluster.name, k3sConfig=k3s_updated_config)
cluster = get_cluster_by_name(client, CLUSTER_NAME)
assert cluster.k3sConfig["kubernetesVersion"] == CLUSTER_VERSION
elif "rke2Config" in cluster:
rke2_config = cluster.rke2Config
rke2_updated_config = rke2_config.copy()
rke2_updated_config["kubernetesVersion"] = CLUSTER_VERSION
client.update(cluster, name=cluster.name,
rke2Config=rke2_updated_config)
cluster = get_cluster_by_name(client, CLUSTER_NAME)
assert cluster.rke2Config["kubernetesVersion"] == CLUSTER_VERSION
def wait_for_ready_nodes():
client, cluster = get_user_client_and_cluster()
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
unready_nodes = []
for node in nodes:
unready_nodes.append(node.id)
while unready_nodes and time.time() - start < MACHINE_TIMEOUT:
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.info.kubernetes.kubeletVersion == CLUSTER_VERSION:
time.sleep(5)
wait_for_node_status(client, node, "active")
if node.id in unready_nodes:
unready_nodes.remove(node.id)
assert not unready_nodes, "Nodes did not successfully upgrade " \
"within the timeout"
def create_and_validate_catalog_app():
cluster = namespace["cluster"]
p_client = namespace['p_client']
ns = create_ns(get_cluster_client_for_token(cluster, USER_TOKEN),
cluster, namespace["project"], ns_name=app_ns)
print(pre_upgrade_externalId)
app = p_client.create_app(
answers=get_defaut_question_answers(get_user_client(),
pre_upgrade_externalId),
externalId=pre_upgrade_externalId,
name=app_create_name,
projectId=namespace["project"].id,
prune=False,
targetNamespace=ns.id
)
validate_catalog_app(app.name, pre_upgrade_externalId)
def modify_catalog_app():
p_client = namespace["p_client"]
app = wait_for_app_to_active(p_client, app_validate_name)
# upgrade the catalog app to a newer version
p_client.action(obj=app, action_name="upgrade",
answers=get_defaut_question_answers(
get_user_client(),
post_upgrade_externalId),
externalId=post_upgrade_externalId)
validate_catalog_app(app.name, post_upgrade_externalId)
def validate_catalog_app(app_name, external_id):
p_client = namespace["p_client"]
app = wait_for_app_to_active(p_client, app_name)
assert app.externalId == external_id, \
"the version of the app is not correct"
# check if associated workloads are active
ns = app.targetNamespace
pramaters = external_id.split('&')
chart = pramaters[1].split("=")[1] + "-" + pramaters[2].split("=")[1]
workloads = p_client.list_workload(namespaceId=ns).data
assert len(workloads) == 1, "expected only 1 workload in the namespace"
for wl in workloads:
assert wl.state == "active"
assert wl.workloadLabels.chart == chart, \
"the chart version is wrong"
def create_and_validate_ip_address_pods():
get_pods = "get pods --all-namespaces -o wide | grep ' 172.17'"
pods_result = execute_kubectl_cmd(get_pods, json_out=False, stderr=True)
print(pods_result.decode('ascii'))
assert pods_result.decode('ascii') is '', "Pods have 172 IP address"
| 26,680 | 35.153117 | 81 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_monitoring.py
|
import pytest
import copy
from .common import * # NOQA
namespace = {
"cluster": None,
"project": None,
"system_project": None,
"system_project_client": None
}
cluster_query_template = {
"obj": None,
"action_name": "query",
"filters": {},
"metricParams": {},
"interval": "5s",
"isDetails": True,
"from": "now-5s",
"to": "now"
}
cluster_graph_list = [
"cluster-network-packet",
"cluster-network-io",
"cluster-disk-io",
"cluster-cpu-load",
"cluster-cpu-usage",
"cluster-fs-usage-percent",
"cluster-memory-usage",
]
etcd_graph_list = [
"etcd-grpc-client",
"etcd-stream",
"etcd-raft-proposals",
"etcd-server-leader-sum",
"etcd-db-bytes-sum",
"etcd-sync-duration",
"etcd-server-failed-proposal",
"etcd-leader-change",
"etcd-rpc-rate",
'etcd-peer-traffic'
]
kube_component_graph_list = [
"scheduler-total-preemption-attempts",
"ingresscontroller-nginx-connection",
"apiserver-request-count",
"controllermanager-queue-depth",
"scheduler-e-2-e-scheduling-latency-seconds-quantile",
"scheduler-pod-unscheduler",
"apiserver-request-latency",
]
node_graph_list = [
"node-network-packet",
"node-network-io",
"node-fs-usage-percent",
"node-cpu-load",
"node-disk-io",
"node-memory-usage",
"node-cpu-usage",
]
rancher_component_graph_list = [
"fluentd-buffer-queue-length",
"fluentd-input-record-number",
"fluentd-output-errors",
"fluentd-output-record-number",
]
workload_graph_list = [
"workload-network-packet",
"workload-memory-usage-bytes-sum",
"workload-cpu-usage",
"workload-network-io",
"workload-disk-io",
]
name_mapping = {
"cluster": cluster_graph_list,
"etcd": etcd_graph_list,
"kube-component": kube_component_graph_list,
"rancher-component": rancher_component_graph_list,
"workload": workload_graph_list,
"node": node_graph_list,
}
STORAGE_CLASS = "longhorn"
ENABLE_STORAGE = os.environ.get('RANCHER_ENABLE_STORAGE_FOR_MONITORING',
"false")
ENABLE_STORAGE = ENABLE_STORAGE.lower()
if ENABLE_STORAGE == "false":
STORAGE_CLASS = "default"
# Longhorn is provided as the persistence storage class
C_MONITORING_ANSWERS = {"operator-init.enabled": "true",
"exporter-node.enabled": "true",
"exporter-node.ports.metrics.port": "9796",
"exporter-kubelets.https": "true",
"exporter-node.resources.limits.cpu": "200m",
"exporter-node.resources.limits.memory": "200Mi",
"operator.resources.limits.memory": "500Mi",
"prometheus.retention": "12h",
"grafana.persistence.enabled": ENABLE_STORAGE,
"prometheus.persistence.enabled": ENABLE_STORAGE,
"prometheus.persistence.storageClass": STORAGE_CLASS,
"grafana.persistence.storageClass": STORAGE_CLASS,
"grafana.persistence.size": "10Gi",
"prometheus.persistence.size": "10Gi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.resources.core.limits.memory": "1000Mi",
"prometheus.persistent.useReleaseName": "true"}
P_MONITORING_ANSWER = {"prometheus.retention": "12h",
"grafana.persistence.enabled": "false",
"prometheus.persistence.enabled": "false",
"prometheus.persistence.storageClass": "default",
"grafana.persistence.storageClass": "default",
"grafana.persistence.size": "10Gi",
"prometheus.persistence.size": "10Gi",
"prometheus.resources.core.requests.cpu": "750m",
"prometheus.resources.core.limits.cpu": "1000m",
"prometheus.resources.core.requests.memory": "750Mi",
"prometheus.resources.core.limits.memory": "1000Mi",
"prometheus.persistent.useReleaseName": "true"}
MONITORING_VERSION = os.environ.get('RANCHER_MONITORING_VERSION', "")
MONITORING_TEMPLATE_ID = "cattle-global-data:system-library-rancher-monitoring"
CLUSTER_MONITORING_APP = "cluster-monitoring"
MONITORING_OPERATOR_APP = "monitoring-operator"
PROJECT_MONITORING_APP = "project-monitoring"
GRAFANA_PROJECT_MONITORING = "grafana-project-monitoring"
PROMETHEUS_PROJECT_MONITORING = "prometheus-project-monitoring"
LONGHORN_APP_VERSION = os.environ.get('RANCHER_LONGHORN_VERSION', "1.0.2")
def test_monitoring_cluster_graph():
rancher_client, cluster = get_user_client_and_cluster()
cluster_monitoring_obj = rancher_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["resourceType"] = "cluster"
validate_cluster_graph(query1, "cluster")
def test_monitoring_etcd_graph():
rancher_client, cluster = get_user_client_and_cluster()
cluster_monitoring_obj = rancher_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["resourceType"] = "etcd"
validate_cluster_graph(query1, "etcd")
def test_monitoring_kube_component_graph():
rancher_client, cluster = get_user_client_and_cluster()
cluster_monitoring_obj = rancher_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["displayResourceType"] = "kube-component"
validate_cluster_graph(query1, "kube-component")
# rancher component graphs are from the fluent app for cluster logging
def test_monitoring_rancher_component_graph():
rancher_client, cluster = get_user_client_and_cluster()
# check if the cluster logging is enabled, assuming fluent is used
if cluster.enableClusterAlerting is False:
print("cluster logging is not enabled, skip the test")
return
else:
cluster_monitoring_obj = rancher_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["displayResourceType"] = "rancher-component"
validate_cluster_graph(query1, "rancher-component")
def test_monitoring_node_graph():
rancher_client, cluster = get_user_client_and_cluster()
node_list_raw = rancher_client.list_node(clusterId=cluster.id).data
for node in node_list_raw:
cluster_monitoring_obj = rancher_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["resourceType"] = "node"
query1["metricParams"]["instance"] = node.id
validate_cluster_graph(query1, "node")
def test_monitoring_workload_graph():
rancher_client, cluster = get_user_client_and_cluster()
system_project = rancher_client.list_project(clusterId=cluster.id,
name="System").data[0]
project_monitoring_obj = rancher_client.list_projectMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = project_monitoring_obj
query1["filters"]["projectId"] = system_project.id
query1["filters"]["resourceType"] = "workload"
query1["metricParams"]["workloadName"] = \
"deployment:cattle-prometheus:grafana-cluster-monitoring"
validate_cluster_graph(query1, "workload")
def test_monitoring_project_monitoring():
validate_project_monitoring(namespace["project"], USER_TOKEN)
# ------------------ RBAC for Project Monitoring ------------------
@if_test_rbac
def test_rbac_cluster_owner_control_project_monitoring():
# cluster owner can enable and disable monitoring in any project
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
user_client = get_client_for_token(user_token)
project = user_client.reload(rbac_get_project())
if project["enableProjectMonitoring"] is True:
assert "disableMonitoring" in project.actions.keys()
disable_project_monitoring(project, user_token)
validate_project_monitoring(project, user_token)
@if_test_rbac
def test_rbac_cluster_member_control_project_monitoring(remove_resource):
# cluster member can enable and disable monitoring in his project
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_client = get_client_for_token(user_token)
# create a new project
project = create_project(user_client, namespace["cluster"])
validate_project_monitoring(project, user_token)
remove_resource(project)
@if_test_rbac
def test_rbac_project_owner_control_project_monitoring():
# project owner can enable and disable monitoring in his project
user_token = rbac_get_user_token_by_role(PROJECT_OWNER)
user_client = get_client_for_token(user_token)
project = user_client.reload(rbac_get_project())
if project["enableProjectMonitoring"] is True:
assert "disableMonitoring" in project.actions.keys()
disable_project_monitoring(project, user_token)
validate_project_monitoring(project, user_token)
@if_test_rbac
def test_rbac_project_member_control_project_monitoring():
# project member can NOT enable and disable monitoring in his project
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_no_permission_project_monitoring(token)
@if_test_rbac
def test_rbac_project_read_only_control_project_monitoring():
# project read-only can NOT enable and disable monitoring in his project
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_no_permission_project_monitoring(token)
@if_test_rbac
def test_rbac_project_owner_project_graph_1():
# project owner can see graphs in his project
project = rbac_get_project()
wl = rbac_get_workload()
token = rbac_get_user_token_by_role(PROJECT_OWNER)
check_permission_project_graph(project, wl, token, True)
@if_test_rbac
def test_rbac_project_owner_project_graph_2():
# project owner can NOT see graphs in others' project
project = rbac_get_unshared_project()
wl = rbac_get_unshared_workload()
token = rbac_get_user_token_by_role(PROJECT_OWNER)
check_permission_project_graph(project, wl, token, False)
@if_test_rbac
def test_rbac_project_member_project_graph_1():
# project member can see graphs in his project
project = rbac_get_project()
wl = rbac_get_workload()
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
check_permission_project_graph(project, wl, token, True)
@if_test_rbac
def test_rbac_project_member_project_graph_2():
# project member can NOT see graphs in others' project
project = rbac_get_unshared_project()
wl = rbac_get_unshared_workload()
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
check_permission_project_graph(project, wl, token, False)
@if_test_rbac
def test_rbac_project_read_only_project_graph_1():
# project read-only can see graphs in his project
project = rbac_get_project()
wl = rbac_get_workload()
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
check_permission_project_graph(project, wl, token, True)
@if_test_rbac
def test_rbac_project_read_only_project_graph_2():
# project read-only can NOT see graphs in other's project
project = rbac_get_unshared_project()
wl = rbac_get_unshared_workload()
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
check_permission_project_graph(project, wl, token, False)
@if_test_rbac
def test_rbac_cluster_owner_project_graph():
# cluster owner can see graphs in all projects
token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project1 = rbac_get_project()
wl1 = rbac_get_workload()
check_permission_project_graph(project1, wl1, token, True)
project2 = rbac_get_unshared_project()
wl2 = rbac_get_unshared_workload()
check_permission_project_graph(project2, wl2, token, True)
@if_test_rbac
def test_rbac_cluster_member_project_graph_1(remove_resource):
# cluster member can see graphs in his project only
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = create_project_and_ns(token,
namespace["cluster"],
random_test_name("cluster-member"))
p_client = get_project_client_for_token(project, token)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
wait_for_wl_to_active(p_client, workload)
remove_resource(project)
check_permission_project_graph(project, workload, token, True)
@if_test_rbac
def test_rbac_cluster_member_project_graph_2():
# cluster member can NOT see graphs in other's project
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project = rbac_get_project()
wl = rbac_get_workload()
check_permission_project_graph(project, wl, token, False)
# ------------------ RBAC for Cluster Monitoring ------------------
@if_test_rbac
def test_rbac_project_owner_cluster_graphs():
# project owner can NOT see cluster graphs
token = rbac_get_user_token_by_role(PROJECT_OWNER)
cluster = namespace["cluster"]
check_permission_cluster_graph(cluster, token, False)
@if_test_rbac
def test_rbac_project_member_cluster_graphs():
# project member can NOT see cluster graphs
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
cluster = namespace["cluster"]
check_permission_cluster_graph(cluster, token, False)
@if_test_rbac
def test_rbac_project_read_only_cluster_graphs():
# project read-only can NOT see cluster graphs
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
cluster = namespace["cluster"]
check_permission_cluster_graph(cluster, token, False)
@if_test_rbac
def test_rbac_cluster_owner_cluster_graphs():
# cluster owner can see cluster graph
token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster = namespace["cluster"]
check_permission_cluster_graph(cluster, token, True)
@if_test_rbac
def test_rbac_cluster_member_cluster_graphs():
# cluster member can see cluster graphs
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
cluster = namespace["cluster"]
check_permission_cluster_graph(cluster, token, True)
@if_test_rbac
def test_rbac_cluster_member_control_cluster_monitoring():
# cluster member can NOT enable or disable the cluster monitoring
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_no_permission_cluster_monitoring(token)
@if_test_rbac
def test_rbac_project_owner_control_cluster_monitoring():
# project owner can NOT enable or disable the cluster monitoring
token = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_no_permission_cluster_monitoring(token)
@if_test_rbac
def test_rbac_project_member_control_cluster_monitoring():
# project member can NOT enable or disable the cluster monitoring
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_no_permission_cluster_monitoring(token)
@if_test_rbac
def test_rbac_project_read_only_control_cluster_monitoring():
# project read-only can NOT enable or disable the cluster monitoring
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_no_permission_cluster_monitoring(token)
@pytest.mark.last
@if_test_rbac
def test_rbac_cluster_owner_control_cluster_monitoring():
# cluster owner can enable and disable the cluster monitoring
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
client = get_client_for_token(user_token)
user_client, cluster = get_user_client_and_cluster(client)
if cluster["enableClusterMonitoring"] is True:
assert "disableMonitoring" in cluster.actions.keys()
user_client.action(cluster, "disableMonitoring")
validate_app_deletion(namespace["system_project_client"],
MONITORING_OPERATOR_APP)
validate_app_deletion(namespace["system_project_client"],
CLUSTER_MONITORING_APP)
cluster = user_client.reload(cluster)
assert "enableMonitoring" in cluster.actions.keys()
user_client.action(cluster, "enableMonitoring",
answers=C_MONITORING_ANSWERS,
version=MONITORING_VERSION)
validate_cluster_monitoring_apps()
@pytest.fixture(scope="module", autouse="True")
def setup_monitoring(request):
"""
Initialize projects, clients, install longhorn app and enable monitoring
with persistence storageClass set to longhorn
"""
global MONITORING_VERSION
rancher_client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
project = create_project(rancher_client, cluster,
random_test_name("p-monitoring"))
system_project = rancher_client.list_project(clusterId=cluster.id,
name="System").data[0]
sys_proj_client = get_project_client_for_token(system_project, USER_TOKEN)
cluster_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["cluster"] = cluster
namespace["project"] = project
namespace["system_project"] = system_project
namespace["system_project_client"] = sys_proj_client
namespace["cluster_client"] = cluster_client
if ENABLE_STORAGE == "true":
# Deploy Longhorn app from the library catalog
app_name = "longhorn"
ns = create_ns(cluster_client, cluster, project, "longhorn-system")
app_ext_id = create_catalog_external_id('library', app_name,
LONGHORN_APP_VERSION)
answer = get_defaut_question_answers(rancher_client, app_ext_id)
project_client = get_project_client_for_token(project, USER_TOKEN)
try:
app = project_client.create_app(
externalId=app_ext_id,
targetNamespace=ns.name,
projectId=ns.projectId,
answers=answer)
print(app)
validate_catalog_app(project_client, app, app_ext_id, answer)
except (AssertionError, RuntimeError):
assert False, "App {} deployment/Validation failed."\
.format(app_name)
monitoring_template = rancher_client.list_template(
id=MONITORING_TEMPLATE_ID).data[0]
if MONITORING_VERSION == "":
MONITORING_VERSION = monitoring_template.defaultVersion
print("MONITORING_VERSION=" + MONITORING_VERSION)
# Enable cluster monitoring
if cluster["enableClusterMonitoring"] is False:
rancher_client.action(cluster, "enableMonitoring",
answers=C_MONITORING_ANSWERS,
version=MONITORING_VERSION)
validate_cluster_monitoring_apps()
# Wait 5 minutes for all graphs to be available
time.sleep(60 * 5)
def fin():
if ENABLE_STORAGE == "true":
# make sure the longhorn app is deleted properly
# otherwise the namespace longhorn-system will be stuck in removing
project_client.delete(app)
validate_app_deletion(project_client, app.id)
print("uninstalled the longhorn app")
rancher_client.delete(project)
# Disable monitoring
cluster = rancher_client.reload(namespace["cluster"])
if cluster["enableClusterMonitoring"] is True:
rancher_client.action(cluster, "disableMonitoring")
print("disabled the cluster monitoring")
request.addfinalizer(fin)
def check_data(source, target_list):
""" check if any graph is missing or any new graph is introduced"""
if not hasattr(source, "data"):
return False
data = source.get("data")
if len(data) == 0:
print("no graph is received")
return False
target_copy = copy.deepcopy(target_list)
res = []
extra = []
for item in data:
if not hasattr(item, "series"):
return False
if len(item.series) == 0:
print("no data point")
return False
name = item.get("graphID").split(":")[1]
res.append(name)
if name in target_list:
target_copy.remove(name)
else:
extra.append(name)
target_list.sort()
res.sort()
target_copy.sort()
extra.sort()
if len(target_copy) != 0 or len(extra) != 0:
print("target graphs : {}".format(target_list))
print("actual graphs : {}".format(res))
print("missing graphs: {}".format(target_copy))
print("extra graphs : {}".format(extra))
return False
return True
def validate_cluster_graph(action_query, resource_type, timeout=10):
target_graph_list = copy.deepcopy(name_mapping.get(resource_type))
rancher_client, cluster = get_user_client_and_cluster()
# handle the special case that if the graph etcd-peer-traffic is
# is not available if there is only one etcd node in the cluster
if resource_type == "etcd":
nodes = get_etcd_nodes(cluster, rancher_client)
if len(nodes) == 1:
target_graph_list.remove("etcd-peer-traffic")
start = time.time()
if resource_type == "kube-component":
cluster = namespace["cluster"]
k8s_version = cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
# the following two graphs are available only in k8s 1.15 and 1.16
if not k8s_version[0:5] in ["v1.15", "v1.16"]:
target_graph_list.remove("apiserver-request-latency")
target_graph_list.remove(
"scheduler-e-2-e-scheduling-latency-seconds-quantile")
while True:
res = rancher_client.action(**action_query)
if check_data(res, target_graph_list):
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for all graphs to be available")
time.sleep(2)
def wait_for_target_up(token, cluster, project, job):
"""wait for a job's state to be up in Prometheus"""
project_client = get_project_client_for_token(project, token)
app = project_client.list_app(name=PROJECT_MONITORING_APP).data[0]
url = CATTLE_TEST_URL + '/k8s/clusters/' + cluster.id \
+ '/api/v1/namespaces/' + app.targetNamespace \
+ '/services/http:access-prometheus:80/proxy/api/v1/targets'
headers1 = {'Authorization': 'Bearer ' + token}
start = time.time()
while True:
t = requests.get(headers=headers1, url=url, verify=False).json()
if "data" in t.keys():
for item in t["data"]["activeTargets"]:
if "job" in item["labels"].keys():
if item["labels"]["job"] == job and item["health"] == "up":
return
if time.time() - start > DEFAULT_MONITORING_TIMEOUT:
raise AssertionError(
"Timed out waiting for target to be up")
time.sleep(5)
def validate_cluster_monitoring_apps(client=None):
if client is None:
sys_project_client = namespace["system_project_client"]
else:
sys_project_client = client
wait_for_app_to_active(sys_project_client, CLUSTER_MONITORING_APP)
wait_for_app_to_active(sys_project_client, MONITORING_OPERATOR_APP)
def validate_no_permission_cluster_monitoring(user_token):
client = get_client_for_token(user_token)
_, cluster = get_user_client_and_cluster(client)
actions = cluster.actions.keys()
assert "enableMonitoring" not in actions
assert "disableMonitoring" not in actions
assert "editMonitoring" not in actions
def validate_no_permission_project_monitoring(user_token):
user_client = get_client_for_token(user_token)
project = user_client.reload(rbac_get_project())
actions = project.actions.keys()
assert "enableMonitoring" not in actions
assert "disableMonitoring" not in actions
assert "editMonitoring" not in actions
def enable_project_monitoring(project, token):
client = get_client_for_token(token)
user_client, cluster = get_user_client_and_cluster(client)
system_project_client = namespace["system_project_client"]
project = user_client.reload(project)
project_client = get_project_client_for_token(project, token)
# enable the project monitoring
if project["enableProjectMonitoring"] is False:
assert "enableMonitoring" in project.actions.keys()
user_client.action(project, "enableMonitoring",
answers=P_MONITORING_ANSWER,
version=MONITORING_VERSION)
wait_for_app_to_active(project_client, PROJECT_MONITORING_APP)
wait_for_app_to_active(system_project_client, MONITORING_OPERATOR_APP)
# wait for targets to be up
wait_for_target_up(token, cluster, project, "expose-prometheus-metrics")
wait_for_target_up(token, cluster, project, "expose-grafana-metrics")
def disable_project_monitoring(project, token):
user_client = get_client_for_token(token)
project = user_client.reload(project)
p_client = get_project_client_for_token(project, token)
# disable the project monitoring
assert "disableMonitoring" in project.actions.keys()
user_client.action(project, "disableMonitoring")
start = time.time()
while True:
if time.time() - start > 30:
raise AssertionError(
"Timed out waiting for disabling project monitoring")
apps = p_client.list_app(name=PROJECT_MONITORING_APP)
wl1 = p_client.list_workload(name=PROMETHEUS_PROJECT_MONITORING)
wl2 = p_client.list_workload(name=GRAFANA_PROJECT_MONITORING)
if len(apps.data) == 0 and len(wl1.data) == 0 and len(wl2.data) == 0:
break
def validate_project_prometheus(project, token):
"""
This function deploys a workload which exposes a metrics
in the target project, and validate if the metrics is scraped
by the project prometheus.
"""
cluster = namespace["cluster"]
project_client = get_project_client_for_token(project, token)
# deploy a workload to test project monitoring
cluster_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(cluster_client, cluster, project, random_name())
port = {"containerPort": 8080,
"type": "containerPort",
"kind": "NodePort",
"protocol": "TCP"}
metrics = [{"path": "/metrics",
"port": 8080,
"schema": "HTTP"}]
con = [{"name": "test-web",
"image": "loganhz/web",
"ports": [port]}]
wl_name = random_name()
workload = project_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns.id,
workloadMetrics=metrics)
wait_for_wl_to_active(project_client, workload)
app = project_client.list_app(name=PROJECT_MONITORING_APP).data[0]
url = CATTLE_TEST_URL + '/k8s/clusters/' + cluster.id \
+ '/api/v1/namespaces/' + app.targetNamespace \
+ '/services/http:access-prometheus:80/proxy/api/v1/' \
+ 'query?query=web_app_online_user_count'
headers1 = {'Authorization': 'Bearer ' + USER_TOKEN}
start = time.time()
while True:
result = requests.get(headers=headers1, url=url, verify=False).json()
if len(result["data"]["result"]) > 0:
project_client.delete(workload)
return
if time.time() - start > DEFAULT_MONITORING_TIMEOUT:
project_client.delete(workload)
raise AssertionError(
"Timed out waiting for the graph data available in Prometheus")
time.sleep(5)
def check_permission_project_graph(project, workload, token, permission=True):
"""
check if the user has the permission to see graphs in the project
:param project: the target project where graphs are from
:param workload: the target workload in the project
:param token: the user's token
:param permission: the user can see graphs if permission is True
:return: None
"""
p_id = project["id"]
client = get_client_for_token(token)
project_monitoring_obj = client.list_project_monitor_graph(projectId=p_id)
graphs_list = project_monitoring_obj.get("data")
if permission:
assert len(graphs_list) > 0
else:
assert len(graphs_list) == 0
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = project_monitoring_obj
query1["filters"]["projectId"] = p_id
query1["filters"]["resourceType"] = "workload"
query1["metricParams"]["workloadName"] = workload.get("id")
res = client.action(**query1)
if permission:
start_time = time.time()
while time.time() - start_time < DEFAULT_TIMEOUT \
and "data" not in res.keys():
time.sleep(10)
res = client.action(**query1)
assert "data" in res.keys()
assert len(res.get("data")) > 0
else:
assert "data" not in res.keys()
def check_permission_cluster_graph(cluster, token, permission=True):
"""
check if the user has the permission to see graphs in the cluster
:param cluster: the target cluster where graphs are from
:param token: the user's token
:param permission: the user can see graphs if permission is True
:return: None
"""
c_id = cluster["id"]
client = get_client_for_token(token)
cluster_monitoring_obj = client.list_cluster_monitor_graph(clusterId=c_id)
graphs_list = cluster_monitoring_obj.get("data")
if permission:
assert len(graphs_list) > 0
else:
assert len(graphs_list) == 0
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["resourceType"] = "cluster"
res = client.action(**query1)
if permission:
start_time = time.time()
while time.time() - start_time < DEFAULT_TIMEOUT \
and "data" not in res.keys():
time.sleep(10)
res = client.action(**query1)
assert "data" in res.keys()
assert len(res.get("data")) > 0
else:
assert "data" not in res.keys()
def validate_project_monitoring(project, token):
enable_project_monitoring(project, token)
validate_project_prometheus(project, token)
disable_project_monitoring(project, token)
| 31,604 | 37.636919 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_run_sonobuoy.py
|
import base64
import os
import time
from .common import *
RANCHER_SONOBUOY_VERSION = os.environ.get("RANCHER_SONOBUOY_VERSION", "0.18.2")
RANCHER_K8S_VERSION = os.environ.get("RANCHER_K8S_VERSION", "v1.18.2")
RANCHER_SONOBUOY_MODE = os.environ.get("RANCHER_SONOBUOY_MODE",
"certified-conformance")
RANCHER_KUBECONFIG = os.environ.get("RANCHER_KUBECONFIG")
RANCHER_FAILED_TEST = os.environ.get("RANCHER_FAILED_TEST")
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
def test_sonobuoy_results():
config = base64.b64decode(RANCHER_KUBECONFIG).decode("utf-8")
kubeconfig = DATA_SUBDIR + "/config"
print(kubeconfig)
with open(kubeconfig, 'w') as f:
f.write(config)
run_sonobuoy_test(kubeconfig)
get_sonobuoy_results(kubeconfig)
delete_sonobuoy_from_cluster(kubeconfig)
def run_sonobuoy_test(kubeconfig):
if not RANCHER_FAILED_TEST:
cmd = "sonobuoy run --mode={0} --kube-conformance-image-version={1} --kubeconfig={2}".format(RANCHER_SONOBUOY_MODE, RANCHER_K8S_VERSION, kubeconfig)
else:
cmd = "sonobuoy run {0} --kube-conformance-image-version={1} --kubeconfig={2}".format(RANCHER_FAILED_TEST, RANCHER_K8S_VERSION, kubeconfig)
status = run_command(cmd)
time.sleep(60)
def get_sonobuoy_results(kubeconfig):
cmd = "sonobuoy status --kubeconfig={0}".format(kubeconfig)
status = run_command(cmd)
print(status)
while "running" in status or "Pending" in status:
status = run_command(cmd, log_out=False)
time.sleep(120)
cmd = "sonobuoy status --kubeconfig={0}".format(kubeconfig)
status = run_command(cmd)
print(status)
cmd = "sonobuoy retrieve --kubeconfig={0}".format(kubeconfig)
result = run_command(cmd, log_out=False)
cmd = "tar xzf {0}".format(result)
status = run_command(cmd, log_out=False)
filepath = "./plugins/e2e/results/global/e2e.log"
is_file = os.path.isfile(filepath)
assert is_file
cmd = "sonobuoy results {0}".format(result)
result = run_command(cmd)
print(result)
def delete_sonobuoy_from_cluster(kubeconfig):
cmd = "sonobuoy delete --all --wait --kubeconfig={0}".format(kubeconfig)
result = run_command(cmd)
print(result)
| 2,309 | 32.970588 | 156 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_helm_v3.py
|
from .common import * # NOQA
import pytest
CATALOG_NAME = random_test_name("test-v3")
CATALOG_URL = "https://github.com/rancher/integration-test-charts.git"
BRANCH = "validation-tests"
MYSQL_EXTERNALID_131 = \
create_catalog_external_id(CATALOG_NAME, "mysql", "1.3.1")
MYSQL_EXTERNALID_132 = \
create_catalog_external_id(CATALOG_NAME, "mysql", "1.3.2")
cluster_detail = {"cluster1": {"project1": None, "namespace1": None,
"cluster": None}}
def test_helm_v3_app_deploy():
client = get_user_client()
answer = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
proj_client, ns, project = get_project_details("cluster1")
app = create_and_validate_app(
proj_client, MYSQL_EXTERNALID_131, ns, project, answer)
proj_client.delete(app)
def test_helm_v3_app_delete():
client = get_user_client()
answer = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
proj_client, ns, project = get_project_details("cluster1")
app = create_and_validate_app(
proj_client, MYSQL_EXTERNALID_131, ns, project, answer)
app = proj_client.delete(app)
validate_app_deletion(proj_client, app.id)
def test_helm_v3_app_upgrade_version():
client = get_user_client()
answer = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
proj_client, ns, project = get_project_details("cluster1")
# deploy app
app = create_and_validate_app(
proj_client, MYSQL_EXTERNALID_131, ns, project, answer)
new_answers = get_defaut_question_answers(client, MYSQL_EXTERNALID_132)
# update app
app = update_and_validate_app(
app, proj_client, MYSQL_EXTERNALID_132, ns, project, new_answers)
proj_client.delete(app)
def test_helm_v3_app_rollback():
client = get_user_client()
answer = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
proj_client, ns, project = get_project_details("cluster1")
# deploy app
app = create_and_validate_app(
proj_client, MYSQL_EXTERNALID_131, ns, project, answer)
rev_id = app.appRevisionId
new_answer = get_defaut_question_answers(client, MYSQL_EXTERNALID_132)
# update app
app = update_and_validate_app(
app, proj_client, MYSQL_EXTERNALID_132, ns, project, new_answer)
proj_client.action(obj=app,
action_name='rollback',
revisionId=rev_id)
app = proj_client.reload(app)
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_131)
proj_client.delete(app)
def test_helm_v3_app_answer_override():
client = get_user_client()
answer = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
proj_client, ns, project = get_project_details("cluster1")
# deploy app
app = create_and_validate_app(
proj_client, MYSQL_EXTERNALID_131, ns, project, answer
)
answer["mysqlUser"] = "admin1234"
# update app
app = update_and_validate_app(
app, proj_client, MYSQL_EXTERNALID_132, ns, project, answer
)
assert app["answers"].mysqlUser == "admin1234", \
"incorrect answer upgrade"
proj_client.delete(app)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster_existing = get_user_client_and_cluster()
cluster_detail["cluster1"]["cluster"] = cluster_existing
cluster_detail["cluster1"]["project1"], \
cluster_detail["cluster1"]["namespace1"] =\
create_project_and_ns(
USER_TOKEN,
cluster_existing,
random_test_name("test-helmv3")
)
# add catalog
admin_client = get_admin_client()
v3_catalog = admin_client.create_catalog(
name=CATALOG_NAME,
baseType="catalog",
branch=BRANCH,
kind="helm",
url=CATALOG_URL,
helmVersion="helm_v3")
assert v3_catalog["helmVersion"] == \
"helm_v3", "Helm version is not helm_v3"
time.sleep(5)
def fin():
admin_client.delete(v3_catalog)
admin_client.delete(cluster_detail["cluster1"]["namespace1"])
admin_client.delete(cluster_detail["cluster1"]["project1"])
request.addfinalizer(fin)
def create_and_validate_app(proj_client, externalid, ns, project, answer):
"""
:param proj_client: Project client of the project
where the app will be deployed
:param externalid: App's external ID
:param ns: namespace
:param project: project
:param answer: answers for the app with external_id: externalid
:return: app
"""
app = proj_client.create_app(
name=random_test_name(),
externalId=externalid,
targetNamespace=ns,
projectId=project,
answers=answer)
app = validate_catalog_app(proj_client, app, externalid)
assert app["helmVersion"] == "helm_v3", "Helm version is not helm_v3"
return app
def update_and_validate_app(app, proj_client, externalid, ns, project, answer):
"""
:param app: app object to be updated
:param proj_client: Project client of the project
where the app will be deployed
:param externalid: App's external ID
:param ns: namespace
:param project: project
:param answer: answers for the app with external_id: externalid
:return: app
"""
app = proj_client.update(
obj=app,
externalId=externalid,
targetNamespace=ns,
projectId=project,
answers=answer)
app = validate_catalog_app(proj_client, app, externalid, answer)
assert app["helmVersion"] == "helm_v3", "Helm version is not helm_v3"
return app
def get_project_details(cluster):
"""
:param cluster: cluster is a "key" in the
cluster_detail pointing to the cluster
:return: proj_client, ns, project
"""
proj_client = get_project_client_for_token(
cluster_detail[cluster]["project1"],
USER_TOKEN
)
ns = cluster_detail["cluster1"]["namespace1"].name
project = cluster_detail["cluster1"]["project1"].id
return proj_client, ns, project
| 6,041 | 33.525714 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_bkp_restore_s3_with_iam.py
|
import pytest
from .common import * # NOQA
from .test_rke_cluster_provisioning import rke_config, engine_install_url, \
validate_rke_dm_host_2
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None,
"nodes": []}
backup_info = {"backupname": None, "backup_id": None, "workload": None,
"backupfilename": None, "etcdbackupdata": None}
@if_test_all_snapshot
def test_bkp_restore_s3_with_iam_create():
validate_backup_create(namespace, backup_info, "s3")
@if_test_all_snapshot
def test_bkp_restore_s3_with_iam_restore():
ns, binfo = validate_backup_create(namespace, backup_info, "s3")
validate_backup_restore(ns, binfo)
@if_test_all_snapshot
def test_bkp_restore_s3_with_iam_delete():
ns, binfo = validate_backup_create(namespace, backup_info, "s3")
ns, binfo = validate_backup_restore(ns, binfo)
validate_backup_delete(ns, binfo, "s3")
@pytest.fixture(scope='module', autouse="True")
def create_project_client_and_cluster_s3_with_iam(request):
rke_config["services"]["etcd"]["backupConfig"] = {
"enabled": "true",
"intervalHours": 12,
"retention": 6,
"type": "backupConfig",
"s3BackupConfig": {
"type": "s3BackupConfig",
"accessKey": "",
"secretKey": "",
"bucketName": AWS_S3_BUCKET_NAME,
"folder": AWS_S3_BUCKET_FOLDER_NAME,
"region": AWS_REGION,
"endpoint": "s3.amazonaws.com"
}
}
cluster_name = random_name()
validate_rke_dm_host_2(node_template_ec2_iam(),
rke_config, False, cluster_name)
client = get_user_client()
cluster = get_cluster_by_name(client, cluster_name)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testiam")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
def fin():
client = get_user_client()
cluster_cleanup(client, cluster)
request.addfinalizer(fin)
def node_template_ec2_iam():
client = get_user_client()
ec2_cloud_credential_config = {"accessKey": AWS_ACCESS_KEY_ID,
"secretKey": AWS_SECRET_ACCESS_KEY}
ec2_cloud_credential = client.create_cloud_credential(
amazonec2credentialConfig=ec2_cloud_credential_config
)
amazonec2Config = {
"iamInstanceProfile": AWS_IAM_PROFILE,
"instanceType": "t3a.medium",
"region": AWS_REGION,
"rootSize": "16",
"securityGroup": [AWS_SG],
"sshUser": "ubuntu",
"subnetId": AWS_SUBNET,
"usePrivateAddress": False,
"volumeType": "gp2",
"vpcId": AWS_VPC,
"zone": AWS_ZONE
}
node_template = client.create_node_template(
amazonec2Config=amazonec2Config,
name=random_name(),
useInternalIpAddress=True,
driver="amazonec2",
engineInstallURL=engine_install_url,
cloudCredentialId=ec2_cloud_credential.id
)
node_template = client.wait_success(node_template)
return node_template
| 3,289 | 31.9 | 76 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_vmwarevsphere_driver.py
|
import pytest, copy
from .common import * # NOQA
RANCHER_VSPHERE_USERNAME = os.environ.get("RANCHER_VSPHERE_USERNAME", "")
RANCHER_VSPHERE_PASSWORD = os.environ.get("RANCHER_VSPHERE_PASSWORD", "")
RANCHER_VSPHERE_VCENTER = os.environ.get("RANCHER_VSPHERE_VCENTER", "")
RANCHER_VSPHERE_VCENTER_PORT = \
os.environ.get("RANCHER_VSPHERE_VCENTER_PORT", 443)
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME",
random_name() + "-cluster")
ENGINE_INSTALL_URL = os.environ.get("RANCHER_ENGINE_INSTALL_URL",
"https://get.docker.com/")
CLONE_FROM = \
os.environ.get("RANCHER_CLONE_FROM",
"/RNCH-HE-FMT/vm/ubuntu-bionic-18.04-cloudimg")
RESOURCE_POOL = \
os.environ.get("RANCHER_RESOURCE_POOL",
"/RNCH-HE-FMT/host/FMT2.R620.1/Resources/validation-tests")
DATASTORE = \
os.environ.get("RANCHER_DATASTORE",
"/RNCH-HE-FMT/datastore/ranch01-silo01-vm01")
DATASTORE_CLUSTER = \
os.environ.get("RANCHER_DATASTORE_CLUSTER",
"/RNCH-HE-FMT/datastore/ds_cluster")
CLOUD_CONFIG = \
os.environ.get("RANCHER_CLOUD_CONFIG",
"#cloud-config\r\npackages:\r\n - redis-server")
rke_config = {
"addonJobTimeout": 30,
"authentication":
{"strategy": "x509",
"type": "authnConfig"},
"ignoreDockerVersion": True,
"ingress":
{"provider": "nginx",
"type": "ingressConfig"},
"monitoring":
{"provider": "metrics-server",
"type": "monitoringConfig"},
"network":
{"plugin": "canal",
"type": "networkConfig",
"options": {"flannelBackendType": "vxlan"}},
"services": {
"etcd": {
"extraArgs":
{"heartbeat-interval": 500,
"election-timeout": 5000},
"snapshot": False,
"backupConfig":
{"intervalHours": 12, "retention": 6, "type": "backupConfig"},
"creation": "12h",
"retention": "72h",
"type": "etcdService"},
"kubeApi": {
"alwaysPullImages": False,
"podSecurityPolicy": False,
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"}},
"sshAgentAuth": False}
vsphereConfig = {
"cfgparam": ["disk.enableUUID=TRUE"],
"cloneFrom": CLONE_FROM,
"cloudinit": "",
"contentLibrary": "",
"cpuCount": "4",
"creationType": "vm",
"customAttribute": ["203=CustomA", "204=CustomB"],
"datacenter": "/RNCH-HE-FMT",
"datastore": "",
"datastoreCluster": "",
"diskSize": "20000",
"folder": "/",
"hostsystem": "",
"memorySize": "16000",
"network": ["/RNCH-HE-FMT/network/Private Range 172.16.128.1-21"],
"password": "",
"pool": RESOURCE_POOL,
"sshPassword": "tcuser",
"sshPort": "22",
"sshUser": "docker",
"sshUserGroup": "staff",
"tag": [
"urn:vmomi:InventoryServiceTag:04ffafd0-d7de-440c-a32c-5cd98761f812:GLOBAL",
"urn:vmomi:InventoryServiceTag:d00f1cf2-6822-46a0-9602-679ea56efd57:GLOBAL"
],
"type": "vmwarevsphereConfig",
"username": "",
"vappIpallocationpolicy": "",
"vappIpprotocol": "",
"vappProperty": "",
"vappTransport": "",
"vcenter": "",
"vcenterPort": "443",
}
if_vsphere_var_present = pytest.mark.skipif(
RANCHER_VSPHERE_USERNAME == '' or
RANCHER_VSPHERE_PASSWORD == '' or
RANCHER_VSPHERE_VCENTER == '',
reason='required env variables are not present')
@if_vsphere_var_present
@pytest.mark.usefixtures("create_cluster")
def test_vsphere_provisioning():
client = get_client_for_token(USER_TOKEN)
cluster = get_cluster_by_name(client=client, name=CLUSTER_NAME)
nodes = client.list_node(clusterId=cluster.id).data
assert 4 == len(nodes)
validate_cluster(client, cluster, skipIngresscheck=False)
@pytest.fixture(scope='module', autouse="True")
def create_cluster(request):
client = get_client_for_token(USER_TOKEN)
cloud_cred = create_vsphere_credential(client)
nt = create_vsphere_nodetemplate(
client, cloud_cred, datastore=DATASTORE)
ntcc = create_vsphere_nodetemplate(
client, cloud_cred, datastore=DATASTORE, cloud_config=CLOUD_CONFIG)
ntdsc = create_vsphere_nodetemplate(
client, cloud_cred, datastore_cluster=DATASTORE_CLUSTER)
cluster = client.create_cluster(
name=CLUSTER_NAME,
rancherKubernetesEngineConfig=rke_config)
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
request.addfinalizer(cluster_cleanup)
master_pool = client.create_node_pool({
"type": "nodetemplate",
"clusterId": cluster.id,
"controlPlane": True,
"etcd": True,
"hostnamePrefix": CLUSTER_NAME + "-master",
"nodeTemplateId": nt.id,
"quantity": 1,
"worker": False,
})
worker_pool1 = client.create_node_pool({
"type": "nodetemplate",
"clusterId": cluster.id,
"controlPlane": False,
"etcd": False,
"hostnamePrefix": CLUSTER_NAME + "-worker",
"nodeTemplateId": nt.id,
"quantity": 1,
"worker": True,
})
worker_pool2 = client.create_node_pool({
"type": "nodetemplate",
"clusterId": cluster.id,
"controlPlane": False,
"etcd": False,
"hostnamePrefix": CLUSTER_NAME + "-worker-cc",
"nodeTemplateId": ntcc.id,
"quantity": 1,
"worker": True,
})
worker_pool3 = client.create_node_pool({
"type": "nodetemplate",
"clusterId": cluster.id,
"controlPlane": False,
"etcd": False,
"hostnamePrefix": CLUSTER_NAME + "-worker-dsc",
"nodeTemplateId": ntdsc.id,
"quantity": 1,
"worker": True,
})
client.wait_success(master_pool)
client.wait_success(worker_pool1)
client.wait_success(worker_pool2)
client.wait_success(worker_pool3)
wait_for_cluster_node_count(client, cluster, 4, timeout=900)
def create_vsphere_credential(client):
return client.create_cloud_credential(
name=random_name(),
vmwarevspherecredentialConfig={
"username": RANCHER_VSPHERE_USERNAME,
"password": RANCHER_VSPHERE_PASSWORD,
"vcenter": RANCHER_VSPHERE_VCENTER,
"vcenterPort": RANCHER_VSPHERE_VCENTER_PORT,
}
)
def cluster_cleanup():
if not RANCHER_CLEANUP_CLUSTER:
return
client = get_client_for_token(USER_TOKEN)
cluster = get_cluster_by_name(client=client, name=CLUSTER_NAME)
nodes = get_schedulable_nodes(cluster)
delete_cluster(client, cluster)
for node in nodes:
wait_for_node_to_be_deleted(client, node)
def create_vsphere_nodetemplate(
client, cloud_cred, cloud_config="", datastore="",
datastore_cluster=""):
vc = copy.copy(vsphereConfig)
if cloud_config != "":
vc["cloudConfig"] = cloud_config
if datastore != "":
vc["datastore"] = datastore
if datastore_cluster != "":
vc["datastoreCluster"] = datastore_cluster
return client.create_node_template({
"vmwarevsphereConfig": vc,
"name": random_name(),
"namespaceId": "fixme",
"useInternalIpAddress": True,
"driver": "vmwarevsphere",
"engineInstallURL": ENGINE_INSTALL_URL,
"cloudCredentialId": cloud_cred.id,
})
| 7,582 | 31.82684 | 84 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_secrets.py
|
import base64
from rancher import ApiError
import pytest
from .common import * # NOQA
CLUSTER_NAME = os.environ.get("CLUSTER_NAME", "")
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
def test_secret_create_all_ns():
"""
Verify creation of secrets is functional
"""
p_client = namespace["p_client"]
ns = namespace["ns"]
# Value is base64 encoded
value = base64.b64encode(b"valueall")
keyvaluepair = {"testall": value.decode('utf-8')}
cluster = namespace["cluster"]
project = namespace["project"]
c_client = namespace["c_client"]
new_ns = create_ns(c_client, cluster, project)
namespacelist = [ns, new_ns]
secret = create_secret(keyvaluepair)
# Create workloads with secret in existing namespaces
for ns in namespacelist:
create_and_validate_workload_with_secret_as_volume(p_client, secret,
ns,
keyvaluepair)
create_and_validate_workload_with_secret_as_env_variable(p_client,
secret,
ns,
keyvaluepair)
# Create a new namespace and workload in the new namespace using the secret
new_ns1 = create_ns(c_client, cluster, project)
create_and_validate_workload_with_secret_as_volume(p_client,
secret,
new_ns1,
keyvaluepair)
create_and_validate_workload_with_secret_as_env_variable(p_client,
secret,
new_ns1,
keyvaluepair)
c_client.delete(new_ns)
def test_secret_create_single_ns():
"""
Verify editing secrets is functional
"""
p_client = namespace["p_client"]
ns = namespace["ns"]
# Value is base64 encoded
value = base64.b64encode(b"valueall")
keyvaluepair = {"testall": value.decode('utf-8')}
secret = create_secret(keyvaluepair, singlenamespace=True)
# Create workloads with secret in existing namespace
create_and_validate_workload_with_secret_as_volume(p_client, secret, ns,
keyvaluepair)
create_and_validate_workload_with_secret_as_env_variable(p_client, secret,
ns, keyvaluepair)
def test_secret_delete_all_ns():
"""
Verify Deletion of secrets is functional
"""
p_client = namespace["p_client"]
ns = namespace["ns"]
# Value is base64 encoded
value = base64.b64encode(b"valuealldelete")
keyvaluepair = {"testalldelete": value.decode('utf-8')}
secret = create_secret(keyvaluepair)
delete_secret(p_client, secret, ns, keyvaluepair)
def test_secret_delete_single_ns():
p_client = namespace["p_client"]
ns = namespace["ns"]
# Value is base64 encoded
value = base64.b64encode(b"valuealldelete")
keyvaluepair = {"testalldelete": value.decode('utf-8')}
secret = create_secret(keyvaluepair, singlenamespace=True)
delete_secret(p_client, secret, ns, keyvaluepair)
def test_secret_edit_all_ns():
p_client = namespace["p_client"]
name = random_test_name("default")
# Value is base64 encoded
value = base64.b64encode(b"valueall")
keyvaluepair = {"testall": value.decode('utf-8')}
cluster = namespace["cluster"]
project = namespace["project"]
c_client = namespace["c_client"]
# Create a namespace
new_ns = create_ns(c_client, cluster, project)
secret = create_secret(keyvaluepair)
# Value is base64 encoded
value1 = base64.b64encode(b"valueall")
value2 = base64.b64encode(b"valueallnew")
updated_dict = {"testall": value1.decode(
'utf-8'), "testallnew": value2.decode('utf-8')}
updated_secret = p_client.update(secret, name=name, namespaceId='NULL',
data=updated_dict)
assert updated_secret['baseType'] == "secret"
updatedsecretdata = updated_secret['data']
print("UPDATED SECRET DATA")
print(updatedsecretdata)
assert updatedsecretdata.data_dict() == updated_dict
# Create workloads using updated secret in the existing namespace
create_and_validate_workload_with_secret_as_volume(p_client, secret,
new_ns,
updatedsecretdata)
create_and_validate_workload_with_secret_as_env_variable(
p_client, secret, new_ns, updatedsecretdata)
# Create a new namespace and workloads in the new namespace using secret
new_ns1 = create_ns(c_client, cluster, project)
create_and_validate_workload_with_secret_as_volume(p_client, secret,
new_ns1,
updatedsecretdata)
create_and_validate_workload_with_secret_as_env_variable(
p_client, secret, new_ns1, updatedsecretdata)
c_client.delete(new_ns)
def test_secret_edit_single_ns():
p_client = namespace["p_client"]
ns = namespace["ns"]
name = random_test_name("default")
# Value is base64 encoded
value = base64.b64encode(b"valueall")
keyvaluepair = {"testall": value.decode('utf-8')}
secret = create_secret(keyvaluepair, singlenamespace=True)
value1 = base64.b64encode(b"valueall")
value2 = base64.b64encode(b"valueallnew")
updated_dict = {"testall": value1.decode(
'utf-8'), "testallnew": value2.decode('utf-8')}
updated_secret = p_client.update(secret, name=name,
namespaceId=ns['name'],
data=updated_dict)
assert updated_secret['baseType'] == "namespacedSecret"
updatedsecretdata = updated_secret['data']
print("UPDATED SECRET DATA")
print(updatedsecretdata)
assert updatedsecretdata.data_dict() == updated_dict
# Create a workload with the updated secret in the existing namespace
create_and_validate_workload_with_secret_as_volume(p_client, secret,
ns,
updatedsecretdata)
create_and_validate_workload_with_secret_as_env_variable(
p_client, secret, ns, updatedsecretdata)
rbac_role_list = [
(CLUSTER_OWNER),
(PROJECT_OWNER),
(PROJECT_MEMBER),
]
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_secret_create(role):
"""
Verify creation of secrets for Cluster owner, project owner and project
member
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
rbac_secret_create(p_client, ns)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_secret_edit(role):
"""
Verify editing of secrets for Cluster owner, project owner and project
member
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
rbac_secret_edit(p_client, ns, project=project)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_secret_delete(role):
"""
Verify deletion of secrets for Cluster owner, project owner and project
member
"""
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
rbac_secret_delete(p_client, ns)
@if_test_rbac
def test_rbac_secret_create_cluster_member(remove_resource):
"""
Verify cluster member can create secret and deploy workload using secret
in the project he created
"""
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = \
create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"),
ns_name=random_test_name("ns-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
rbac_secret_create(p_client, ns)
# Create a project as cluster owner and verify the cluster member cannot
# create secret in this project
keyvaluepair = {"testall": "valueall"}
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
ownerproject, ns = \
create_project_and_ns(cluster_owner_token,
namespace["cluster"],
random_test_name("rbac-cluster-owner"))
cluster_member_client = get_project_client_for_token(ownerproject,
user_token)
remove_resource(project)
remove_resource(ownerproject)
with pytest.raises(ApiError) as e:
create_secret(keyvaluepair, singlenamespace=False,
p_client=cluster_member_client)
assert e.value.error.status == 403
assert e.value.error.code == 'PermissionDenied'
@if_test_rbac
def test_rbac_secret_edit_cluster_member(remove_resource):
"""
Verify cluster member can create secret and edit secret in the project he
created
"""
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = \
create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"),
ns_name=random_test_name("ns-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
rbac_secret_edit(p_client, ns, project=project)
# Create a project as cluster owner and verify the cluster member cannot
# edit secret in this project
keyvaluepair = {"testall": "valueall"}
value1 = ("valueall")
value2 = ("valueallnew")
updated_dict = {"testall": value1, "testallnew": value2}
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
ownerproject, ns = create_project_and_ns(
cluster_owner_token,
namespace["cluster"],
random_test_name("rbac-cluster-owner"))
cluster_owner_client = get_project_client_for_token(ownerproject,
cluster_owner_token)
cluster_member_client = get_project_client_for_token(ownerproject,
user_token)
ownersecret = create_secret(keyvaluepair, singlenamespace=False,
p_client=cluster_owner_client)
remove_resource(project)
remove_resource(ownerproject)
with pytest.raises(ApiError) as e:
cluster_member_client.update(ownersecret, namespaceId='NULL',
data=updated_dict)
assert e.value.error.status == 404
assert e.value.error.code == 'NotFound'
@if_test_rbac
def test_rbac_secret_delete_cluster_member(remove_resource):
"""
Verify cluster member can create secret and delete secret in the project he
created
"""
keyvaluepair = {"testall": "valueall"}
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = \
create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"),
ns_name=random_test_name("ns-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
rbac_secret_delete(p_client, ns)
# Create a project as cluster owner and verify the cluster member cannot
# delete secret in this project
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
ownerproject, ns = create_project_and_ns(
cluster_owner_token,
namespace["cluster"],
random_test_name("rbac-cluster-owner"))
cluster_owner_client = get_project_client_for_token(ownerproject,
cluster_owner_token)
cluster_member_client = get_project_client_for_token(ownerproject,
user_token)
ownersecret = create_secret(keyvaluepair, singlenamespace=False,
p_client=cluster_owner_client)
remove_resource(project)
remove_resource(ownerproject)
with pytest.raises(ApiError) as e:
delete_secret(cluster_member_client, ownersecret, ns, keyvaluepair)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_secret_create_project_readonly():
"""
Verify read-only user cannot create secret
"""
project = rbac_get_project()
user_token1 = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project, user_token1)
keyvaluepair = {"testall": "valueall"}
# Read Only member cannot create secrets
with pytest.raises(ApiError) as e:
create_secret(keyvaluepair, singlenamespace=False,
p_client=readonly_user_client)
assert e.value.error.status == 403
assert e.value.error.code == 'PermissionDenied'
@if_test_rbac
def test_rbac_secret_edit_project_readonly_member(remove_resource):
"""
Verify read-only user cannot edit secret
"""
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
ns = rbac_get_namespace()
user_token1 = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project, user_token1)
keyvaluepair = {"testall": "valueall"}
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
# As a cluster owner, create a secret
secret = create_secret(keyvaluepair, p_client=cluster_owner_p_client,
ns=ns)
# Readonly member cannot edit secret
value1 = ("valueall")
value2 = ("valueallnew")
updated_dict = {"testall": value1, "testallnew": value2}
remove_resource(secret)
with pytest.raises(ApiError) as e:
readonly_user_client.update(secret,
namespaceId=ns['name'],
data=updated_dict)
assert e.value.error.status == 404
assert e.value.error.code == 'NotFound'
@if_test_rbac
def test_rbac_secret_delete_project_readonly(remove_resource):
"""
Verify read-only user cannot delete secret
"""
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
ns = rbac_get_namespace()
user_token1 = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project, user_token1)
keyvaluepair = {"testall": "valueall"}
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
# As a cluster owner, create a secret
secret = create_secret(keyvaluepair, p_client=cluster_owner_p_client,
ns=ns)
remove_resource(secret)
# Assert read-only user cannot delete the secret
with pytest.raises(ApiError) as e:
delete_secret(readonly_user_client, secret, ns, keyvaluepair)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_secret_list(remove_resource, role):
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, user_token)
rbac_secret_list(p_client)
@if_test_rbac
def test_rbac_secret_list_cluster_member(remove_resource):
"""
Verify cluster member can list secret in the project he created
"""
keyvaluepair = {"testall": "valueall"}
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = \
create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"),
ns_name=random_test_name("ns-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
rbac_secret_list(p_client)
# Create a project as cluster owner and verify the cluster member cannot
# list secret in this project
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
ownerproject, ns = create_project_and_ns(
cluster_owner_token,
namespace["cluster"],
random_test_name("rbac-cluster-owner"))
cluster_owner_client = get_project_client_for_token(ownerproject,
cluster_owner_token)
cluster_member_client = get_project_client_for_token(ownerproject,
user_token)
ownersecret = create_secret(keyvaluepair, singlenamespace=False,
p_client=cluster_owner_client)
secretdict = cluster_member_client.list_secret(name=ownersecret.name)
secretdata = secretdict.get('data')
assert len(secretdata) == 0
cluster_owner_client.delete(ownersecret)
remove_resource(project)
remove_resource(ownerproject)
@if_test_rbac
def test_rbac_secret_list_project_readonly():
"""
Verify read-only user cannot list secret
"""
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
readonly_user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project,
readonly_user_token)
keyvaluepair = {"testall": "valueall"}
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
# As a cluster owner, create a secret
secret = create_secret(keyvaluepair, p_client=cluster_owner_p_client)
# Verify Read-Only user cannot list the secret
secretdict = readonly_user_client.list_secret(name=secret.name)
secretdata = secretdict.get('data')
assert len(secretdata) == 0
cluster_owner_p_client.delete(secret)
def rbac_secret_create(p_client, ns):
"""
Verify creating secret is functional.
The p_client passed as the parameter would be as per the role assigned
"""
keyvaluepair = {"testall": "valueall"}
secret = create_secret(keyvaluepair, singlenamespace=False,
p_client=p_client)
# Create workloads with secret in existing namespace
create_and_validate_workload_with_secret_as_volume(p_client, secret,
ns, keyvaluepair)
def rbac_secret_edit(p_client, ns, project=None):
"""
Verify creating, editing secret is functional.
The p_client passed as the parameter would be as per the role assigned
"""
value = base64.b64encode(b"valueall")
keyvaluepair = {"testall": value.decode('utf-8')}
cluster = namespace["cluster"]
c_client = namespace["c_client"]
# Create a namespace
secret = create_secret(keyvaluepair, singlenamespace=False,
p_client=p_client)
# Value is base64 encoded
value1 = base64.b64encode(b"valueall")
value2 = base64.b64encode(b"valueallnew")
updated_dict = {"testall": value1.decode(
'utf-8'), "testallnew": value2.decode('utf-8')}
updated_secret = p_client.update(secret, namespaceId='NULL',
data=updated_dict)
assert updated_secret['baseType'] == "secret"
updatedsecretdata = updated_secret['data']
print("UPDATED SECRET DATA")
print(updatedsecretdata)
assert updatedsecretdata.data_dict() == updated_dict
# Create workloads using updated secret in the existing namespace
create_and_validate_workload_with_secret_as_volume(p_client, secret,
ns,
updatedsecretdata)
create_and_validate_workload_with_secret_as_env_variable(
p_client, secret, ns, updatedsecretdata)
# Create a new namespace and workloads in the new namespace using secret
new_ns1 = create_ns(c_client, cluster, project)
create_and_validate_workload_with_secret_as_volume(p_client, secret,
new_ns1,
updatedsecretdata)
create_and_validate_workload_with_secret_as_env_variable(
p_client, secret, new_ns1, updatedsecretdata)
def rbac_secret_delete(p_client, ns):
"""
Verify creating, deleting secret is functional.
The p_client passed as the parameter would be as per the role assigned
"""
keyvaluepair = {"testall": "valueall"}
secret = create_secret(keyvaluepair, singlenamespace=False,
p_client=p_client)
# Verify deletion of secret
delete_secret(p_client, secret, ns, keyvaluepair)
def rbac_secret_list(p_client):
'''
Create a secret and list the secret
'''
keyvaluepair = {"testall": "valueall"}
secret = create_secret(keyvaluepair, singlenamespace=False,
p_client=p_client)
secretname = secret.name
secretdict = p_client.list_secret(name=secretname)
secretlist = secretdict.get('data')
testsecret = secretlist[0]
testsecret_data = testsecret['data']
assert len(secretlist) == 1
assert testsecret.type == "secret"
assert testsecret.name == secretname
assert testsecret_data.data_dict() == keyvaluepair
p_client.delete(testsecret)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testsecret")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
def validate_workload_with_secret(p_client, workload,
type, ns_name, keyvaluepair,
workloadwithsecretasVolume=False,
workloadwithsecretasenvvar=False,
podcount=1):
validate_workload(p_client, workload, type, ns_name, pod_count=podcount)
pod_list = p_client.list_pod(workloadId=workload.id).data
mountpath = "/test"
for i in range(0, len(keyvaluepair)):
key = list(keyvaluepair.keys())[i]
if workloadwithsecretasVolume:
key_file_in_pod = mountpath + "/" + key
command = "cat " + key_file_in_pod + ''
if is_windows():
command = 'powershell -NoLogo -NonInteractive -Command "& {{ cat {0} }}"'.format(key_file_in_pod)
result = kubectl_pod_exec(pod_list[0], command)
assert result.rstrip() == base64.b64decode(list(keyvaluepair.values())[i])
elif workloadwithsecretasenvvar:
command = 'env'
if is_windows():
command = 'powershell -NoLogo -NonInteractive -Command \'& {{ (Get-Item -Path Env:).Name | ' \
'% { "$_=$((Get-Item -Path Env:\\$_).Value)" }}\''
result = kubectl_pod_exec(pod_list[0], command)
if base64.b64decode(list(keyvaluepair.values())[i]) in result:
assert True
def delete_secret(client, secret, ns, keyvaluepair):
key = list(keyvaluepair.keys())[0]
secretname = secret.name
print("Delete Secret")
client.delete(secret)
# Sleep to allow for the secret to be deleted
time.sleep(5)
timeout = 30
print("Secret list after deleting secret")
secretdict = client.list_secret(name=secretname)
print(secretdict)
print(secretdict.get('data'))
start = time.time()
if len(secretdict.get('data')) > 0:
testdata = secretdict.get('data')
print("TESTDATA")
print(testdata[0]['data'])
while key in testdata[0]['data']:
if time.time() - start > timeout:
raise AssertionError("Timed out waiting for deletion")
time.sleep(.5)
secretdict = client.list_secret(name=secretname)
testdata = secretdict.get('data')
assert True
if len(secretdict.get('data')) == 0:
assert True
# Verify secret is deleted by "kubectl get secret" command
command = " get secret " + secret['name'] + " --namespace=" + ns.name
print("Command to obtain the secret")
print(command)
result = execute_kubectl_cmd(command, json_out=False, stderr=True)
print(result)
print("Verify that the secret does not exist "
"and the error code returned is non zero ")
if result != 0:
assert True
def create_and_validate_workload_with_secret_as_volume(p_client, secret, ns,
keyvaluepair,
name=None):
if name is None:
name = random_test_name("test")
# Create Workload with secret as volume
mountpath = "/test"
volumeMounts = [{"readOnly": False, "type": "volumeMount",
"mountPath": mountpath, "name": "vol1"}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts}]
secretName = secret['name']
volumes = [{"type": "volume", "name": "vol1",
"secret": {"type": "secretVolumeSource", "defaultMode": 256,
"secretName": secretName,
"optional": False, "items": "NULL"}}]
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id, volumes=volumes)
validate_workload_with_secret(p_client, workload, "deployment",
ns.name, keyvaluepair,
workloadwithsecretasVolume=True)
def create_and_validate_workload_with_secret_as_env_variable(p_client, secret,
ns, keyvaluepair,
name=None):
if name is None:
name = random_test_name("test")
# Create Workload with secret as env variable
secretName = secret['name']
environmentdata = [{
"source": "secret",
"sourceKey": None,
"sourceName": secretName
}]
con = [{"name": "test",
"image": TEST_IMAGE,
"environmentFrom": environmentdata}]
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload_with_secret(p_client, workload, "deployment",
ns.name, keyvaluepair,
workloadwithsecretasenvvar=True)
def create_secret(keyvaluepair, singlenamespace=False,
p_client=None, ns=None, name=None):
if p_client is None:
p_client = namespace["p_client"]
if name is None:
name = random_test_name("default")
if ns is None:
ns = namespace["ns"]
if not singlenamespace:
secret = p_client.create_secret(name=name, data=keyvaluepair)
assert secret['baseType'] == "secret"
else:
secret = p_client.create_namespaced_secret(name=name,
namespaceId=ns['name'],
data=keyvaluepair)
assert secret['baseType'] == "namespacedSecret"
print(secret)
secretdata = secret['data']
print("SECRET DATA")
print(secretdata)
assert secretdata.data_dict() == keyvaluepair
return secret
| 28,809 | 35.700637 | 113 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_network_policy.py
|
import pytest
from .common import * # NOQA
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
random_password = random_test_name("pass")
PROJECT_ISOLATION = os.environ.get('RANCHER_PROJECT_ISOLATION', "disabled")
def test_connectivity_between_pods():
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
con = [{"name": "test1",
"image": TEST_IMAGE,
}]
name = random_test_name("default")
schedulable_node_count = len(get_schedulable_nodes(cluster))
# Check connectivity between pods in the same namespace
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
schedulable_node_count)
check_connectivity_between_workload_pods(p_client, workload)
# Create another namespace in the same project
# Deploy workloads in this namespace
# Check that pods belonging to different namespace within the
# same project can communicate
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
ns1 = create_ns(c_client, cluster, namespace["project"])
workload1 = p_client.create_workload(name=name,
containers=con,
namespaceId=ns1.id,
daemonSetConfig={})
validate_workload(p_client, workload1, "daemonSet", ns1.name,
schedulable_node_count)
check_connectivity_between_workload_pods(p_client, workload1)
check_connectivity_between_workloads(p_client, workload, p_client,
workload1)
# Create new project in the same cluster
# Create namespace and deploy workloads
# Check communication between pods belonging to different namespace across
# different projects
p2, ns2 = create_project_and_ns(USER_TOKEN, cluster)
p2_client = get_project_client_for_token(p2, USER_TOKEN)
workload2 = p2_client.create_workload(name=name,
containers=con,
namespaceId=ns2.id,
daemonSetConfig={})
validate_workload(p2_client, workload2, "daemonSet", ns2.name,
schedulable_node_count)
check_connectivity_between_workload_pods(p2_client, workload2)
allow_connectivity = True
if PROJECT_ISOLATION == "enabled":
allow_connectivity = False
check_connectivity_between_workloads(
p_client, workload, p2_client, workload2,
allow_connectivity=allow_connectivity)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testnp")
p_client = get_project_client_for_token(p, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
| 3,394 | 38.022989 | 78 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_service_discovery.py
|
"""
This file contains tests for service discovery.
This file also has rbac tests based on different roles
Test requirement:
Below Env variables need to set
CATTLE_TEST_URL - url to rancher server
ADMIN_TOKEN - Admin token from rancher
USER_TOKEN - User token from rancher
RANCHER_CLUSTER_NAME - Cluster name to run test on
RANCHER_TEST_RBAC - Boolean (Optional), To run role based tests.
"""
from .common import ApiError
from .common import ast
from .common import CLUSTER_MEMBER
from .common import CLUSTER_OWNER
from .common import create_kubeconfig
from .common import create_ns
from .common import create_project_and_ns
from .common import get_cluster_client_for_token
from .common import get_project_client_for_token
from .common import get_user_client
from .common import get_user_client_and_cluster
from .common import if_test_rbac
from .common import os
from .common import PROJECT_MEMBER
from .common import PROJECT_OWNER
from .common import PROJECT_READ_ONLY
from .common import pytest
from .common import random_test_name
from .common import rbac_get_namespace
from .common import rbac_get_project
from .common import rbac_get_user_token_by_role
from .common import rbac_get_workload
from .common import skip_test_windows_os
from .common import TEST_IMAGE
from .common import TEST_IMAGE_NGINX
from .common import time
from .common import USER_TOKEN
from .common import validate_dns_record
from .common import validate_dns_record_deleted
from .common import validate_service_discovery
from .common import validate_workload
from .common import validate_workload_image
from .common import wait_for_condition
from .common import wait_for_pod_images
from .common import wait_for_pods_in_workload
from .common import wait_for_pod_to_running
from .common import wait_for_wl_to_active
RANCHER_CLEANUP_PROJECT = os.environ.get("RANCHER_CLEANUP_PROJECT", "True")
namespace = {"p_client": None, "ns": None, "cluster": None,
"project": None, "testclient_pods": [], "workload": None}
rbac_role_list = [
CLUSTER_OWNER,
CLUSTER_MEMBER,
PROJECT_OWNER,
PROJECT_MEMBER,
PROJECT_READ_ONLY
]
DNS_RESOLUTION_DEFAULT_SECONDS = \
os.environ.get("RANCHER_DNS_RESOLUTION_SECONDS", 30)
SKIP_PING_CHECK_TEST = \
ast.literal_eval(os.environ.get('RANCHER_SKIP_PING_CHECK_TEST', "False"))
if_skip_ping_check_test = pytest.mark.skipif(
SKIP_PING_CHECK_TEST,
reason='For skipping tests in clusters that ' \
'are deployed with security groups that will not allow ping')
def create_and_validate_wl(name, con, scale, type, p_client=None, ns=None):
if p_client is None:
p_client = namespace["p_client"]
if ns is None:
ns = namespace["ns"]
workload = p_client.create_workload(name=name, containers=con,
namespaceId=ns.id, scale=scale)
wait_for_pods_in_workload(p_client, workload, scale)
validate_workload(p_client, workload, type, ns.id, pod_count=scale)
return workload
def update_and_validate_workload(workload, con, scale, p_client=None, ns=None):
if p_client is None:
p_client = namespace["p_client"]
if ns is None:
ns = namespace["ns"]
p_client.update(workload, containers=con, scale=scale)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pod_images(p_client, workload, ns.name, con[0]["image"], scale)
wait_for_pods_in_workload(p_client, workload, scale)
validate_workload(p_client, workload, "deployment", ns.name, scale)
validate_workload_image(p_client, workload, con[0]["image"], ns)
def validate_dns_record_for_workload(workload, scale, record,
p_client=None, testclient_pods=None):
if p_client is None:
p_client = namespace["p_client"]
if testclient_pods is None:
testclient_pods = namespace["testclient_pods"]
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
for pod in testclient_pods:
validate_dns_record(pod, record, expected_ips)
def test_service_discovery_when_workload_scale_up():
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-sd-up")
type = "deployment"
# deploy a workload
scale = 2
workload = create_and_validate_wl(name, con, scale, type)
# test service discovery
validate_service_discovery(workload, scale, namespace["p_client"],
namespace["ns"], namespace["testclient_pods"])
# workload scales up to 3 pods
scale = 3
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_service_discovery(workload, scale, namespace["p_client"],
namespace["ns"], namespace["testclient_pods"])
def test_service_discovery_when_workload_scale_down():
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-sd-dw")
type = "deployment"
# deploy a workload
scale = 3
workload = create_and_validate_wl(name, con, scale, type)
# test service discovery
validate_service_discovery(workload, scale, namespace["p_client"],
namespace["ns"], namespace["testclient_pods"])
# workload scale down to 2 pods
scale = 2
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_service_discovery(workload, scale, namespace["p_client"],
namespace["ns"], namespace["testclient_pods"])
def test_service_discovery_when_workload_upgrade():
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-sd-upgrade")
type = "deployment"
scale = 2
# deploy a workload
workload = create_and_validate_wl(name, con, scale, type)
# test service discovery
validate_service_discovery(workload, scale, namespace["p_client"],
namespace["ns"], namespace["testclient_pods"])
# upgrade
con = [{"name": "test1",
"image": TEST_IMAGE_NGINX}]
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_service_discovery(workload, scale, namespace["p_client"],
namespace["ns"], namespace["testclient_pods"])
# upgrade again
con = [{"name": "test1",
"image": TEST_IMAGE}]
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_service_discovery(workload, scale, namespace["p_client"],
namespace["ns"], namespace["testclient_pods"])
def test_dns_record_type_workload_when_workload_scale_up():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-dns-up")
type = "deployment"
# deploy a workload
scale = 2
workload = create_and_validate_wl(name, con, scale, type)
record = {"type": "dnsRecord", "targetWorkloadIds": [workload["id"]],
"name": random_test_name("record"), "namespaceId": ns.id}
create_dns_record(record, p_client)
# test dns record for the workload
validate_dns_record_for_workload(workload, scale, record)
# workload scale up to 3 pods
scale = 3
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_dns_record_for_workload(workload, scale, record)
def test_dns_record_type_workload_when_workload_scale_down():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-dns-dw")
type = "deployment"
# deploy a workload
scale = 3
workload = create_and_validate_wl(name, con, scale, type)
record = {"type": "dnsRecord",
"targetWorkloadIds": [workload["id"]],
"name": random_test_name("record"),
"namespaceId": ns.id}
create_dns_record(record, p_client)
# test service discovery
validate_dns_record_for_workload(workload, scale, record)
# workload scale down to 2 pods
scale = 2
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_dns_record_for_workload(workload, scale, record)
def test_dns_record_type_workload_when_workload_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-dns-upgrade")
scale = 2
type = "deployment"
# deploy a workload
workload = create_and_validate_wl(name, con, scale, type)
record = {"type": "dnsRecord", "targetWorkloadIds": [workload["id"]],
"name": random_test_name("record"), "namespaceId": ns.id}
create_dns_record(record, p_client)
# test service discovery
validate_dns_record_for_workload(workload, scale, record)
# upgrade the workload
con = [{"name": "test1",
"image": TEST_IMAGE_NGINX}]
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_dns_record_for_workload(workload, scale, record)
# upgrade the workload again
con = [{"name": "test1",
"image": TEST_IMAGE}]
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_dns_record_for_workload(workload, scale, record)
# Windows could not transpose the remote ICMP packages,
# since TCP/UDP packets can still be transposed,
# one can substitute ping <destination> with curl <destination> to be able
# to debug connectivity to outside
@skip_test_windows_os
@if_skip_ping_check_test
def test_dns_record_type_external_ip():
ns = namespace["ns"]
record = {"type": "dnsRecord", "ipAddresses": ["8.8.8.8"],
"name": random_test_name("record"), "namespaceId": ns.id}
expected = record["ipAddresses"]
create_and_validate_dns_record(record, expected)
# Windows could not transpose the remote ICMP packages,
# since TCP/UDP packets can still be transposed,
# one can substitute ping <destination> with curl <destination> to be able
# to debug connectivity to outside
@skip_test_windows_os
@if_skip_ping_check_test
def test_dns_record_type_multiple_external_ips():
ns = namespace["ns"]
record = {"type": "dnsRecord", "ipAddresses": ["8.8.8.8", "8.8.4.4"],
"name": random_test_name("record"), "namespaceId": ns.id}
expected = record["ipAddresses"]
create_and_validate_dns_record(record, expected)
# Windows could not transpose the remote ICMP packages,
# since TCP/UDP packets can still be transposed,
# one can substitute ping <destination> with curl <destination> to be able
# to debug connectivity to outside
@skip_test_windows_os
@if_skip_ping_check_test
def test_dns_record_type_hostname():
ns = namespace["ns"]
record = {"type": "dnsRecord", "hostname": "google.com",
"name": random_test_name("record"), "namespaceId": ns.id}
expected = [record["hostname"]]
create_and_validate_dns_record(record, expected)
# Windows could not transpose the remote ICMP packages,
# since TCP/UDP packets can still be transposed,
# one can substitute ping <destination> with curl <destination> to be able
# to debug connectivity to outside
@skip_test_windows_os
@if_skip_ping_check_test
def test_dns_record_type_alias():
ns = namespace["ns"]
first_record = {"type": "dnsRecord", "hostname": "google.com",
"name": random_test_name("record"), "namespaceId": ns.id}
target_record = create_dns_record(first_record)
record = {"type": "dnsRecord", "targetDnsRecordIds": [target_record["id"]],
"name": random_test_name("record"), "namespaceId": ns.id}
expected = [first_record["hostname"]]
create_and_validate_dns_record(record, expected)
def test_dns_record_type_workload():
ns = namespace["ns"]
workload = namespace["workload"]
p_client = namespace["p_client"]
record = {"type": "dnsRecord", "targetWorkloadIds": [workload["id"]],
"name": random_test_name("record"), "namespaceId": ns.id}
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
create_and_validate_dns_record(record, expected_ips)
def test_dns_record_type_multiple_workloads():
ns = namespace["ns"]
workload = namespace["workload"]
p_client = namespace["p_client"]
wlname = random_test_name("default")
con = [{"name": "test1",
"image": TEST_IMAGE}]
additional_workload = p_client.create_workload(name=wlname,
containers=con,
namespaceId=ns.id,
scale=1)
wait_for_wl_to_active(p_client, additional_workload)
awl_pods = wait_for_pods_in_workload(p_client, additional_workload, 1)
wait_for_pod_to_running(p_client, awl_pods[0])
record = {"type": "dnsRecord",
"targetWorkloadIds": [workload["id"], additional_workload["id"]],
"name": random_test_name("record"),
"namespaceId": ns.id}
workloads = [workload, additional_workload]
expected_ips = []
for wl in workloads:
pods = p_client.list_pod(workloadId=wl["id"]).data
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
create_and_validate_dns_record(record, expected_ips)
def test_dns_record_type_selector():
ns = namespace["ns"]
workload = namespace["workload"]
p_client = namespace["p_client"]
selector = \
workload["labels"]["workload.user.cattle.io/workloadselector"]
record = {"type": "dnsRecord",
"selector":
{"workload.user.cattle.io/workloadselector": selector},
"name": random_test_name("record"), "namespaceId": ns.id}
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
create_and_validate_dns_record(record, expected_ips)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_service_discovery_create(role):
"""
Creates dns record and validates it for different roles passed in parameter
@param role: User role in rancher eg. project owner, project member etc
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
workload = rbac_get_workload()
p_client = get_project_client_for_token(project, token)
record = {"type": "dnsRecord", "targetWorkloadIds": [workload["id"]],
"name": random_test_name("record"), "namespaceId": ns.id}
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
dns_record = create_dns_record(record, p_client)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
dns_record = create_dns_record(record, p_client)
# test dns record for the workload
validate_dns_record_for_workload(workload, 1,
record, p_client=p_client)
p_client.delete(dns_record)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_service_discovery_edit(role):
"""
Creates dns record with cluster owner role and edit it with different roles
@param role: User role in rancher eg. project owner, project member etc
"""
c_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
workload_1 = rbac_get_workload()
p_client = get_project_client_for_token(project, token)
p_client_for_c_owner = get_project_client_for_token(project, c_owner_token)
wlname = random_test_name("default")
con = [{"name": "test1",
"image": TEST_IMAGE}]
workload_2 = p_client_for_c_owner.create_workload(name=wlname,
containers=con,
namespaceId=ns.id,
scale=1)
wait_for_wl_to_active(p_client_for_c_owner, workload_2)
record_1 = {"type": "dnsRecord", "targetWorkloadIds": [workload_1["id"]],
"name": random_test_name("record"), "namespaceId": ns.id}
dns_record = create_dns_record(record_1, p_client_for_c_owner)
validate_dns_record_for_workload(workload_1, 1,
record_1, p_client=p_client_for_c_owner)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.update(dns_record, targetWorkloadIds=workload_2["id"])
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
p_client.update(dns_record, type="dnsRecord",
targetWorkloadIds=[workload_2["id"]])
p_client.reload(dns_record)
p_client_for_c_owner.delete(dns_record)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_service_discovery_delete(role):
"""
Creates dns record with cluster owner and delete with different roles.
@param role: User role in rancher eg. project owner, project member etc
"""
c_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
workload = rbac_get_workload()
p_client = get_project_client_for_token(project, token)
p_client_for_c_owner = get_project_client_for_token(project, c_owner_token)
record = {"type": "dnsRecord", "targetWorkloadIds": [workload["id"]],
"name": random_test_name("record"), "namespaceId": ns.id}
dns_record = create_dns_record(record, p_client_for_c_owner)
validate_dns_record_for_workload(workload, 1,
record, p_client=p_client_for_c_owner)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.delete(dns_record)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
p_client.delete(dns_record)
validate_dns_record_deleted(p_client, dns_record)
def create_and_validate_dns_record(record, expected, p_client=None,
testclient_pods=None):
if testclient_pods is None:
testclient_pods = namespace["testclient_pods"]
create_dns_record(record, p_client)
assert len(testclient_pods) > 0
for pod in testclient_pods:
validate_dns_record(pod, record, expected)
def create_dns_record(record, p_client=None):
if p_client is None:
p_client = namespace["p_client"]
created_record = p_client.create_dns_record(record)
wait_for_condition(
p_client, created_record,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state)
return created_record
@pytest.fixture(scope='module', autouse="True")
def setup(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testsd")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
new_ns = create_ns(c_client, cluster, p)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
wlname = random_test_name("default")
con = [{"name": "test1",
"image": TEST_IMAGE}]
workload = p_client.create_workload(name=wlname,
containers=con,
namespaceId=ns.id,
scale=2)
wait_for_wl_to_active(p_client, workload)
namespace["workload"] = workload
pods = wait_for_pods_in_workload(p_client, workload, 2)
pod = wait_for_pod_to_running(p_client, pods[0])
namespace["testclient_pods"].append(pod)
workload = p_client.create_workload(name=wlname,
containers=con,
namespaceId=new_ns.id,
scale=1)
wait_for_wl_to_active(p_client, workload)
pods = wait_for_pods_in_workload(p_client, workload, 1)
pod = wait_for_pod_to_running(p_client, pods[0])
namespace["testclient_pods"].append(pod)
assert len(namespace["testclient_pods"]) == 2
def fin():
client = get_user_client()
client.delete(namespace["project"])
if RANCHER_CLEANUP_PROJECT == "True":
request.addfinalizer(fin)
| 21,594 | 35.91453 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_rke_cluster_provisioning.py
|
from threading import Thread
import pytest
from .common import * # NOQA
from rancher import ApiError
K8S_VERSION = os.environ.get('RANCHER_K8S_VERSION', "")
K8S_VERSION_UPGRADE = os.environ.get('RANCHER_K8S_VERSION_UPGRADE', "")
POD_SECURITY_POLICY_TEMPLATE = \
os.environ.get('RANCHER_POD_SECURITY_POLICY_TEMPLATE',
"restricted")
DO_ACCESSKEY = os.environ.get('DO_ACCESSKEY', "None")
AZURE_SUBSCRIPTION_ID = os.environ.get("AZURE_SUBSCRIPTION_ID")
AZURE_CLIENT_ID = os.environ.get("AZURE_CLIENT_ID")
AZURE_CLIENT_SECRET = os.environ.get("AZURE_CLIENT_SECRET")
AZURE_TENANT_ID = os.environ.get("AZURE_TENANT_ID")
worker_count = int(os.environ.get('RANCHER_STRESS_TEST_WORKER_COUNT', 1))
HOST_NAME = os.environ.get('RANCHER_HOST_NAME', "testcustom")
engine_install_url = "https://releases.rancher.com/install-docker/20.10.sh"
rke_config = {
"addonJobTimeout": 30,
"authentication":
{"strategy": "x509",
"type": "authnConfig"},
"ignoreDockerVersion": True,
"ingress":
{"provider": "nginx",
"type": "ingressConfig"},
"monitoring":
{"provider": "metrics-server",
"type": "monitoringConfig"},
"network":
{"plugin": "canal",
"type": "networkConfig",
"options": {"flannel_backend_type": "vxlan"}},
"services": {
"etcd": {
"extraArgs":
{"heartbeat-interval": 500,
"election-timeout": 5000},
"snapshot": False,
"backupConfig":
{"intervalHours": 12, "retention": 6, "type": "backupConfig"},
"creation": "12h",
"retention": "72h",
"type": "etcdService"},
"kubeApi": {
"alwaysPullImages": False,
"podSecurityPolicy": False,
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"}},
"sshAgentAuth": False}
rke_config_windows = {
"addonJobTimeout": 30,
"authentication":
{"strategy": "x509",
"type": "authnConfig"},
"ignoreDockerVersion": True,
"ingress":
{"provider": "nginx",
"type": "ingressConfig"},
"monitoring":
{"provider": "metrics-server",
"type": "monitoringConfig"},
"network": {
"mtu": 0,
"plugin": "flannel",
"type": "networkConfig",
"options": {
"flannel_backend_type": "vxlan",
"flannel_backend_port": "4789",
"flannel_backend_vni": "4096"
}
},
"services": {
"etcd": {
"extraArgs":
{"heartbeat-interval": 500,
"election-timeout": 5000},
"snapshot": False,
"backupConfig":
{"intervalHours": 12, "retention": 6, "type": "backupConfig"},
"creation": "12h",
"retention": "72h",
"type": "etcdService"},
"kubeApi": {
"alwaysPullImages": False,
"podSecurityPolicy": False,
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"}},
"sshAgentAuth": False}
rke_config_windows_host_gw = {
"addonJobTimeout": 30,
"authentication":
{"strategy": "x509",
"type": "authnConfig"},
"ignoreDockerVersion": True,
"ingress":
{"provider": "nginx",
"type": "ingressConfig"},
"monitoring":
{"provider": "metrics-server",
"type": "monitoringConfig"},
"network": {
"mtu": 0,
"plugin": "flannel",
"type": "networkConfig",
"options": {
"flannel_backend_type": "host-gw"
}
},
"services": {
"etcd": {
"extraArgs":
{"heartbeat-interval": 500,
"election-timeout": 5000},
"snapshot": False,
"backupConfig":
{"intervalHours": 12, "retention": 6, "type": "backupConfig"},
"creation": "12h",
"retention": "72h",
"type": "etcdService"},
"kubeApi": {
"alwaysPullImages": False,
"podSecurityPolicy": False,
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"}},
"sshAgentAuth": False}
rke_config_cis_1_4 = {
"addonJobTimeout": 30,
"authentication":
{"strategy": "x509",
"type": "authnConfig"},
"ignoreDockerVersion": True,
"ingress":
{"provider": "nginx",
"type": "ingressConfig"},
"monitoring":
{"provider": "metrics-server",
"type": "monitoringConfig"},
"network":
{"plugin": "canal",
"type": "networkConfig",
"options": {"flannel_backend_type": "vxlan"}},
"services": {
"etcd": {
"extraArgs":
{"heartbeat-interval": 500,
"election-timeout": 5000},
"snapshot": False,
"backupConfig":
{"intervalHours": 12, "retention": 6, "type": "backupConfig"},
"creation": "12h",
"retention": "72h",
"type": "etcdService",
"gid": 1001,
"uid": 1001},
"kubeApi": {
"alwaysPullImages": True,
"auditLog":
{"enabled": True},
"eventRateLimit":
{"enabled": True},
"extraArgs":
{"anonymous-auth": False,
"enable-admission-plugins": "ServiceAccount,"
"NamespaceLifecycle,"
"LimitRanger,"
"PersistentVolumeLabel,"
"DefaultStorageClass,"
"ResourceQuota,"
"DefaultTolerationSeconds,"
"AlwaysPullImages,"
"DenyEscalatingExec,"
"NodeRestriction,"
"PodSecurityPolicy,"
"MutatingAdmissionWebhook,"
"ValidatingAdmissionWebhook,"
"Priority,"
"TaintNodesByCondition,"
"PersistentVolumeClaimResize,"
"EventRateLimit",
"profiling": False,
"service-account-lookup": True,
"tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_"
"128_GCM_SHA256,"
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,"
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,"
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,"
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,"
"TLS_ECDHE_ECDSA_WITH_AES_"
"256_GCM_SHA384,"
"TLS_RSA_WITH_AES_256_GCM_SHA384,"
"TLS_RSA_WITH_AES_128_GCM_SHA256"},
"extraBinds": ["/opt/kubernetes:/opt/kubernetes"],
"podSecurityPolicy": True,
"secretsEncryptionConfig":
{"enabled": True},
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"},
"kubeController": {
"extraArgs": {
"address": "127.0.0.1",
"feature-gates": "RotateKubeletServerCertificate=true",
"profiling": "false",
"terminated-pod-gc-threshold": "1000"
},
},
"kubelet": {
"extraArgs": {
"protect-kernel-defaults": True,
"feature-gates": "RotateKubeletServerCertificate=true"
},
"generateServingCertificate": True
},
"scheduler": {
"extraArgs": {
"address": "127.0.0.1",
"profiling": False
}
}},
"sshAgentAuth": False}
rke_config_cis_1_5 = {
"addonJobTimeout": 30,
"ignoreDockerVersion": True,
"services": {
"etcd": {
"gid": 52034,
"uid": 52034,
"type": "etcdService"},
"kubeApi": {
"podSecurityPolicy": True,
"secretsEncryptionConfig":
{"enabled": True},
"auditLog":
{"enabled": True},
"eventRateLimit":
{"enabled": True},
"type": "kubeAPIService"},
"kubeController": {
"extraArgs": {
"feature-gates": "RotateKubeletServerCertificate=true",
},
},
"scheduler": {
"image": "",
"extraArgs": {},
"extraBinds": [],
"extraEnv": []
},
"kubelet": {
"generateServingCertificate": True,
"extraArgs": {
"feature-gates": "RotateKubeletServerCertificate=true",
"protect-kernel-defaults": True,
"tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_"
"128_GCM_SHA256,"
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,"
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,"
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,"
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,"
"TLS_ECDHE_ECDSA_WITH_AES_"
"256_GCM_SHA384,"
"TLS_RSA_WITH_AES_256_GCM_SHA384,"
"TLS_RSA_WITH_AES_128_GCM_SHA256"
},
"extraBinds": [],
"extraEnv": [],
"clusterDomain": "",
"infraContainerImage": "",
"clusterDnsServer": "",
"failSwapOn": False
},
},
"network":
{"plugin": "",
"options": {},
"mtu": 0,
"nodeSelector": {}},
"authentication": {
"strategy": "",
"sans": [],
"webhook": None,
},
"sshAgentAuth": False,
"windowsPreferredCluster": False
}
if K8S_VERSION != "":
rke_config["kubernetesVersion"] = K8S_VERSION
rke_config_cis_1_4["kubernetesVersion"] = K8S_VERSION
rke_config_cis_1_5["kubernetesVersion"] = K8S_VERSION
rke_config_windows_host_gw_aws_provider = rke_config_windows_host_gw.copy()
rke_config_windows_host_gw_aws_provider["cloudProvider"] = {"name": "aws",
"type": "cloudProvider",
"awsCloudProvider":
{"type": "awsCloudProvider"}}
rke_config_aws_provider = rke_config.copy()
rke_config_aws_provider["cloudProvider"] = {"name": "aws",
"type": "cloudProvider",
"awsCloudProvider":
{"type": "awsCloudProvider"}}
rke_config_aws_provider_2 = rke_config.copy()
rke_config_aws_provider_2["cloudProvider"] = {"name": "aws",
"type": "cloudProvider"}
rke_config_azure_provider = rke_config.copy()
rke_config_azure_provider["cloudProvider"] = {
"name": "azure",
"azureCloudProvider": {
"aadClientId": AZURE_CLIENT_ID,
"aadClientSecret": AZURE_CLIENT_SECRET,
"subscriptionId": AZURE_SUBSCRIPTION_ID,
"tenantId": AZURE_TENANT_ID}}
if_stress_enabled = pytest.mark.skipif(
not os.environ.get('RANCHER_STRESS_TEST_WORKER_COUNT'),
reason='Stress test not enabled')
if_test_edit_cluster = pytest.mark.skipif(
CLUSTER_NAME == "",
reason='Edit cluster tests not enabled')
def test_cis_complaint():
# rke_config_cis
node_roles = [
["controlplane"], ["controlplane"],
["etcd"], ["etcd"], ["etcd"],
["worker"], ["worker"], ["worker"]
]
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
len(node_roles), random_test_name(HOST_NAME))
rke_config_cis = get_cis_rke_config()
client = get_admin_client()
cluster = client.create_cluster(
name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config_cis,
defaultPodSecurityPolicyTemplateId=POD_SECURITY_POLICY_TEMPLATE)
assert cluster.state == "provisioning"
configure_cis_requirements(aws_nodes,
CIS_SCAN_PROFILE,
node_roles,
client,
cluster
)
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_az_host_1(node_template_az):
validate_rke_dm_host_1(node_template_az, rke_config)
def test_rke_az_host_2(node_template_az):
validate_rke_dm_host_2(node_template_az, rke_config)
def test_rke_az_host_3(node_template_az):
validate_rke_dm_host_3(node_template_az, rke_config)
def test_rke_az_host_4(node_template_az):
validate_rke_dm_host_4(node_template_az, rke_config)
def test_rke_az_host_with_provider_1(node_template_az):
validate_rke_dm_host_1(node_template_az, rke_config_azure_provider)
def test_rke_az_host_with_provider_2(node_template_az):
validate_rke_dm_host_2(node_template_az, rke_config_azure_provider)
def test_rke_do_host_1(node_template_do):
validate_rke_dm_host_1(node_template_do, rke_config)
def test_rke_do_host_2(node_template_do):
validate_rke_dm_host_2(node_template_do, rke_config)
def test_rke_do_host_3(node_template_do):
validate_rke_dm_host_3(node_template_do, rke_config)
def test_rke_do_host_4(node_template_do):
validate_rke_dm_host_4(node_template_do, rke_config)
def test_rke_linode_host_1(node_template_linode):
validate_rke_dm_host_1(node_template_linode, rke_config)
def test_rke_linode_host_2(node_template_linode):
validate_rke_dm_host_2(node_template_linode, rke_config)
def test_rke_linode_host_3(node_template_linode):
validate_rke_dm_host_3(node_template_linode, rke_config)
def test_rke_ec2_host_1(node_template_ec2):
validate_rke_dm_host_1(node_template_ec2, rke_config)
def test_rke_ec2_host_2(node_template_ec2):
validate_rke_dm_host_2(node_template_ec2, rke_config)
def test_rke_ec2_host_3(node_template_ec2):
validate_rke_dm_host_3(node_template_ec2, rke_config)
def test_rke_ec2_host_with_aws_provider_1(node_template_ec2_with_provider):
validate_rke_dm_host_1(node_template_ec2_with_provider,
rke_config_aws_provider)
def test_rke_ec2_host_with_aws_provider_2(node_template_ec2_with_provider):
validate_rke_dm_host_2(node_template_ec2_with_provider,
rke_config_aws_provider)
def test_rke_ec2_host_with_aws_provider_3(node_template_ec2_with_provider):
validate_rke_dm_host_1(node_template_ec2_with_provider,
rke_config_aws_provider_2)
def test_rke_ec2_host_4(node_template_ec2):
validate_rke_dm_host_4(node_template_ec2, rke_config)
def test_rke_custom_host_1():
node_roles = [["worker", "controlplane", "etcd"]]
cluster, aws_nodes = create_and_validate_custom_host(node_roles)
cluster_cleanup(get_user_client(), cluster, aws_nodes)
def test_rke_custom_host_2():
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
cluster, aws_nodes = create_and_validate_custom_host(node_roles)
cluster_cleanup(get_user_client(), cluster, aws_nodes)
def test_rke_custom_host_3():
node_roles = [
["controlplane"], ["controlplane"],
["etcd"], ["etcd"], ["etcd"],
["worker"], ["worker"], ["worker"]
]
cluster, aws_nodes = create_and_validate_custom_host(node_roles)
cluster_cleanup(get_user_client(), cluster, aws_nodes)
def test_rke_custom_host_4():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
8, random_test_name(HOST_NAME))
node_roles = [
{"roles": ["controlplane"],
"nodes":[aws_nodes[0], aws_nodes[1]]},
{"roles": ["etcd"],
"nodes": [aws_nodes[2], aws_nodes[3], aws_nodes[4]]},
{"roles": ["worker"],
"nodes": [aws_nodes[5], aws_nodes[6], aws_nodes[7]]}
]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
delay = 120
host_threads = []
for node_role in node_roles:
host_thread = Thread(target=register_host_after_delay,
args=(client, cluster, node_role, delay))
host_threads.append(host_thread)
host_thread.start()
time.sleep(30)
for host_thread in host_threads:
host_thread.join()
cluster = validate_cluster(client, cluster,
check_intermediate_state=False,
k8s_version=K8S_VERSION)
cluster_cleanup(client, cluster, aws_nodes)
@if_stress_enabled
def test_rke_custom_host_stress():
aws_nodes = AmazonWebServices().create_multiple_nodes(
worker_count + 4, random_test_name("teststress"))
node_roles = [["controlplane"], ["etcd"], ["etcd"], ["etcd"]]
worker_role = ["worker"]
for int in range(0, worker_count):
node_roles.append(worker_role)
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
cluster = validate_cluster(client, cluster,
check_intermediate_state=False)
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_custom_host_etcd_plane_changes():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
7, random_test_name(HOST_NAME))
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for i in range(0, 5):
aws_node = aws_nodes[i]
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
cluster = validate_cluster(client, cluster)
etcd_nodes = get_role_nodes(cluster, "etcd")
assert len(etcd_nodes) == 1
# Add 1 more etcd node
aws_node = aws_nodes[5]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["etcd"], aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 6)
validate_cluster(client, cluster, intermediate_state="updating")
# Add 1 more etcd node
aws_node = aws_nodes[6]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["etcd"], aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 7)
validate_cluster(client, cluster, intermediate_state="updating")
# Delete the first etcd node
client.delete(etcd_nodes[0])
validate_cluster(client, cluster, intermediate_state="updating")
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_custom_host_etcd_plane_changes_1():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
7, random_test_name(HOST_NAME))
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for i in range(0, 5):
aws_node = aws_nodes[i]
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster,
node_roles[i], aws_node)
aws_node.execute_command(docker_run_cmd)
cluster = validate_cluster(client, cluster)
etcd_nodes = get_role_nodes(cluster, "etcd")
assert len(etcd_nodes) == 1
# Add 2 more etcd node
aws_node = aws_nodes[5]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["etcd"], aws_node)
aws_node.execute_command(docker_run_cmd)
aws_node = aws_nodes[6]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["etcd"], aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 7)
validate_cluster(client, cluster, intermediate_state="updating")
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_custom_host_control_plane_changes():
aws_nodes = \
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
6, random_test_name(HOST_NAME))
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for i in range(0, 5):
aws_node = aws_nodes[i]
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster,
node_roles[i], aws_node)
aws_node.execute_command(docker_run_cmd)
cluster = validate_cluster(client, cluster)
control_nodes = get_role_nodes(cluster, "control")
assert len(control_nodes) == 1
# Add 1 more control node
aws_node = aws_nodes[5]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["controlplane"],
aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 6)
validate_cluster(client, cluster, intermediate_state="updating")
# Delete the first control node
client.delete(control_nodes[0])
validate_cluster(client, cluster, intermediate_state="updating")
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_custom_host_worker_plane_changes():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
4, random_test_name(HOST_NAME))
node_roles = [["controlplane"], ["etcd"],
["worker"]]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for i in range(0, 3):
aws_node = aws_nodes[i]
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
cluster = validate_cluster(client, cluster)
worker_nodes = get_role_nodes(cluster, "worker")
assert len(worker_nodes) == 1
# Add 1 more worker node
aws_node = aws_nodes[3]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["worker"], aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 4)
validate_cluster(client, cluster, check_intermediate_state=False)
# Delete the first worker node
client.delete(worker_nodes[0])
validate_cluster(client, cluster, check_intermediate_state=False)
cluster_cleanup(client, cluster, aws_nodes)
def test_rke_custom_host_control_node_power_down():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
5, random_test_name(HOST_NAME))
node_roles = [["controlplane"], ["etcd"],
["worker"]]
client = get_user_client()
cluster = client.create_cluster(name=evaluate_clustername(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for i in range(0, 3):
aws_node = aws_nodes[i]
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
cluster = validate_cluster(client, cluster)
control_nodes = get_role_nodes(cluster, "control")
assert len(control_nodes) == 1
# Add 1 more control node
aws_node = aws_nodes[3]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["controlplane"],
aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 4)
validate_cluster(client, cluster, check_intermediate_state=False)
# Power Down the first control node
aws_control_node = aws_nodes[0]
AmazonWebServices().stop_node(aws_control_node, wait_for_stopped=True)
control_node = control_nodes[0]
wait_for_node_status(client, control_node, "unavailable")
validate_cluster(
client, cluster,
check_intermediate_state=False,
nodes_not_in_active_state=[control_node.requestedHostname])
# Add 1 more worker node
aws_node = aws_nodes[4]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["worker"], aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 4)
validate_cluster(client, cluster, check_intermediate_state=False)
cluster_cleanup(client, cluster, aws_nodes)
@if_test_edit_cluster
def test_edit_cluster_k8s_version():
client = get_user_client()
clusters = client.list_cluster(name=evaluate_clustername()).data
assert len(clusters) == 1
cluster = clusters[0]
rke_config = cluster.rancherKubernetesEngineConfig
rke_updated_config = rke_config.copy()
rke_updated_config["kubernetesVersion"] = K8S_VERSION_UPGRADE
cluster = client.update(cluster,
name=cluster.name,
rancherKubernetesEngineConfig=rke_updated_config)
cluster = validate_cluster(client, cluster, intermediate_state="updating",
k8s_version=K8S_VERSION_UPGRADE)
def test_delete_cluster():
client = get_user_client()
cluster = get_cluster_by_name(client, CLUSTER_NAME)
delete_cluster(client, cluster)
def validate_rke_dm_host_1(node_template,
rancherKubernetesEngineConfig=rke_config,
attemptDelete=True):
client = get_user_client()
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"controlPlane": True,
"etcd": True,
"worker": True,
"quantity": 1,
"clusterId": None}
nodes.append(node)
cluster, node_pools = create_and_validate_cluster(
client, nodes, rancherKubernetesEngineConfig)
if attemptDelete:
cluster_cleanup(client, cluster)
else:
return cluster, node_pools
def validate_rke_dm_host_2(node_template,
rancherKubernetesEngineConfig=rke_config,
attemptDelete=True, clusterName=None):
client = get_user_client()
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"controlPlane": True,
"quantity": 1}
nodes.append(node)
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"etcd": True,
"quantity": 1}
nodes.append(node)
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"worker": True,
"quantity": 3}
nodes.append(node)
cluster, node_pools = create_and_validate_cluster(
client, nodes, rancherKubernetesEngineConfig, clusterName)
if attemptDelete:
cluster_cleanup(client, cluster)
def validate_rke_dm_host_3(node_template,
rancherKubernetesEngineConfig=rke_config):
client = get_user_client()
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"controlPlane": True,
"quantity": 2}
nodes.append(node)
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"etcd": True,
"quantity": 3}
nodes.append(node)
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"worker": True,
"quantity": 3}
nodes.append(node)
cluster, node_pools = create_and_validate_cluster(
client, nodes, rancherKubernetesEngineConfig)
cluster_cleanup(client, cluster)
def validate_rke_dm_host_4(node_template,
rancherKubernetesEngineConfig=rke_config):
client = get_user_client()
# Create cluster and add a node pool to this cluster
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"requestedHostname": node_name,
"controlPlane": True,
"etcd": True,
"worker": True,
"quantity": 1}
nodes.append(node)
cluster, node_pools = create_and_validate_cluster(
client, nodes, rancherKubernetesEngineConfig)
assert len(cluster.nodes()) == 1
node1 = cluster.nodes().data[0]
assert len(node_pools) == 1
node_pool = node_pools[0]
# Increase the scale of the node pool to 3
node_pool = client.update(node_pool, nodeTemplateId=node_template.id,
quantity=3)
cluster = validate_cluster(client, cluster, intermediate_state="updating")
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) == 3
# Delete node1
node1 = client.delete(node1)
wait_for_node_to_be_deleted(client, node1)
cluster = validate_cluster(client, cluster, intermediate_state="updating")
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) == 3
cluster_cleanup(client, cluster)
def create_and_validate_cluster(client, nodes,
rancherKubernetesEngineConfig=rke_config,
clusterName=None):
cluster = client.create_cluster(
name=clusterName
if clusterName is not None else evaluate_clustername(),
rancherKubernetesEngineConfig=rancherKubernetesEngineConfig)
node_pools = []
for node in nodes:
node["clusterId"] = cluster.id
success = False
start = time.time()
while not success:
if time.time() - start > 10:
raise AssertionError(
"Timed out waiting for cluster owner global Roles")
try:
time.sleep(1)
node_pool = client.create_node_pool(**node)
success = True
except ApiError:
success = False
node_pool = client.wait_success(node_pool)
node_pools.append(node_pool)
cluster = validate_cluster(client, cluster)
return cluster, node_pools
def random_node_name():
if not HOST_NAME or HOST_NAME == "testcustom":
return "testauto" + "-" + str(random_int(100000, 999999))
else:
return HOST_NAME + "-" + str(random_int(100000, 999999))
def evaluate_clustername():
if CLUSTER_NAME == "":
cluster_name = random_name()
else:
cluster_name = CLUSTER_NAME
return cluster_name
@pytest.fixture(scope='session')
def node_template_az():
client = get_user_client()
ec2_cloud_credential_config = {
"clientId": AZURE_CLIENT_ID,
"clientSecret": AZURE_CLIENT_SECRET,
"subscriptionId": AZURE_SUBSCRIPTION_ID
}
azure_cloud_credential = client.create_cloud_credential(
azurecredentialConfig=ec2_cloud_credential_config
)
azConfig = {
"availabilitySet": "docker-machine",
"customData": "",
"dns": "",
"dockerPort": "2376",
"environment": "AzurePublicCloud",
"image": "canonical:UbuntuServer:16.04.0-LTS:latest",
"location": "westus",
"noPublicIp": False,
"openPort": [
"6443/tcp",
"2379/tcp",
"2380/tcp",
"8472/udp",
"4789/udp",
"10256/tcp",
"10250/tcp",
"10251/tcp",
"10252/tcp",
"80/tcp",
"443/tcp",
"9999/tcp",
"8888/tcp",
"30456/tcp",
"30457/tcp",
"30458/tcp",
"30459/tcp",
"9001/tcp"
],
"privateIpAddress": "",
"resourceGroup": "docker-machine",
"size": "Standard_A2",
"sshUser": "docker-user",
"staticPublicIp": False,
"storageType": "Standard_LRS",
"subnet": "docker-machine",
"subnetPrefix": "192.168.0.0/16",
"usePrivateIp": False,
"vnet": "docker-machine-vnet"
}
node_template = client.create_node_template(
azureConfig=azConfig,
name=random_name(),
driver="azure",
cloudCredentialId=azure_cloud_credential.id,
useInternalIpAddress=True)
node_template = client.wait_success(node_template)
return node_template
@pytest.fixture(scope='session')
def node_template_do():
client = get_user_client()
do_cloud_credential_config = {"accessToken": DO_ACCESSKEY}
do_cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig=do_cloud_credential_config
)
node_template = client.create_node_template(
digitaloceanConfig={"region": "nyc3",
"size": "2gb",
"image": "ubuntu-18-04-x64"},
name=random_name(),
driver="digitalocean",
cloudCredentialId=do_cloud_credential.id,
engineInstallURL=engine_install_url,
useInternalIpAddress=True)
node_template = client.wait_success(node_template)
return node_template
@pytest.fixture(scope='session')
def node_template_linode():
client = get_user_client()
linode_cloud_credential_config = {"token": LINODE_ACCESSKEY}
linode_cloud_credential = client.create_cloud_credential(
linodecredentialConfig=linode_cloud_credential_config
)
node_template = client.create_node_template(
linodeConfig={"authorizedUsers": "",
"createPrivateIp": False,
"dockerPort": "2376",
"image": "linode/ubuntu18.04",
"instanceType": "g6-standard-2",
"label": "",
"region": "us-west",
"sshPort": "22",
"sshUser": "",
"stackscript": "",
"stackscriptData": "",
"swapSize": "512",
"tags": "",
"uaPrefix": "Rancher"},
name=random_name(),
driver="linode",
cloudCredentialId=linode_cloud_credential.id,
engineInstallURL=engine_install_url,
useInternalIpAddress=True)
node_template = client.wait_success(node_template)
return node_template
@pytest.fixture(scope='session')
def node_template_ec2():
client = get_user_client()
ec2_cloud_credential_config = {"accessKey": AWS_ACCESS_KEY_ID,
"secretKey": AWS_SECRET_ACCESS_KEY}
ec2_cloud_credential = client.create_cloud_credential(
amazonec2credentialConfig=ec2_cloud_credential_config
)
amazonec2Config = {
"instanceType": "t3.medium",
"region": AWS_REGION,
"rootSize": "16",
"securityGroup": [AWS_SG],
"sshUser": "ubuntu",
"subnetId": AWS_SUBNET,
"usePrivateAddress": False,
"volumeType": "gp2",
"vpcId": AWS_VPC,
"zone": AWS_ZONE
}
node_template = client.create_node_template(
amazonec2Config=amazonec2Config,
name=random_name(),
useInternalIpAddress=True,
driver="amazonec2",
engineInstallURL=engine_install_url,
cloudCredentialId=ec2_cloud_credential.id
)
node_template = client.wait_success(node_template)
return node_template
@pytest.fixture(scope='session')
def node_template_ec2_with_provider():
client = get_user_client()
ec2_cloud_credential_config = {"accessKey": AWS_ACCESS_KEY_ID,
"secretKey": AWS_SECRET_ACCESS_KEY}
ec2_cloud_credential = client.create_cloud_credential(
amazonec2credentialConfig=ec2_cloud_credential_config
)
amazonec2Config = {
"instanceType": "t3a.medium",
"region": AWS_REGION,
"rootSize": "16",
"securityGroup": [AWS_SG],
"sshUser": "ubuntu",
"subnetId": AWS_SUBNET,
"usePrivateAddress": False,
"volumeType": "gp2",
"vpcId": AWS_VPC,
"zone": AWS_ZONE,
"iamInstanceProfile": AWS_IAM_PROFILE
}
node_template = client.create_node_template(
amazonec2Config=amazonec2Config,
name=random_name(),
useInternalIpAddress=True,
driver="amazonec2",
engineInstallURL=engine_install_url,
cloudCredentialId=ec2_cloud_credential.id
)
node_template = client.wait_success(node_template)
return node_template
def register_host_after_delay(client, cluster, node_role, delay):
aws_nodes = node_role["nodes"]
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(
client, cluster, node_role["roles"], aws_node)
aws_node.execute_command(docker_run_cmd)
time.sleep(delay)
def create_and_validate_custom_host(node_roles, random_cluster_name=False,
validate=True, version=K8S_VERSION):
client = get_user_client()
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
len(node_roles), random_test_name(HOST_NAME))
cluster, nodes = create_custom_host_from_nodes(aws_nodes, node_roles,
random_cluster_name,
version=version)
if validate:
cluster = validate_cluster(client, cluster,
check_intermediate_state=False,
k8s_version=version)
return cluster, nodes
def create_custom_host_from_nodes(nodes, node_roles,
random_cluster_name=False, windows=False,
windows_flannel_backend='vxlan',
version=K8S_VERSION):
client = get_user_client()
cluster_name = random_name() if random_cluster_name \
else evaluate_clustername()
if windows:
if windows_flannel_backend == "host-gw":
config = rke_config_windows_host_gw_aws_provider
else:
config = rke_config_windows
else:
config = rke_config
if version != "":
config["kubernetesVersion"] = version
cluster = client.create_cluster(name=cluster_name,
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=config,
windowsPreferedCluster=windows)
assert cluster.state == "provisioning"
i = 0
for aws_node in nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
print("Docker run command: " + docker_run_cmd)
for nr in node_roles[i]:
aws_node.roles.append(nr)
result = aws_node.execute_command(docker_run_cmd)
print(result)
i += 1
cluster = validate_cluster_state(client, cluster,
check_intermediate_state=False)
return cluster, nodes
def get_cis_rke_config(profile=CIS_SCAN_PROFILE):
rke_tmp_config = None
rke_config_dict = None
try:
rke_config_dict = {
'rke-cis-1.4': rke_config_cis_1_4,
'rke-cis-1.5': rke_config_cis_1_5
}
rke_tmp_config = rke_config_dict[profile]
except KeyError:
print('Invalid RKE CIS profile. Supported profiles: ')
for k in rke_config_dict.keys():
print("{0}".format(k))
else:
print('Valid RKE CIS Profile loaded: {0}'.format(profile))
return rke_tmp_config
| 43,156 | 34.964167 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_global_role.py
|
from .common import * # NOQA
from rancher import ApiError
import pytest
# values used to create a catalog
BRANCH = "dev"
URL = "https://git.rancher.io/system-charts"
def test_global_role_create_1(remove_resource):
""" test that admin can create a new global role, assign it
to a standard user, then the user get the newly-assigned permission
a global role for managing catalogs is used for this test.
"""
# create a new global role that permits creating catalogs
gr = validate_create_global_role(ADMIN_TOKEN, True, True)
remove_resource(gr)
# create a new user
client = get_admin_client()
user, token = create_user(client)
remove_resource(user)
# check that the user can NOT create catalogs
name = random_name()
validate_create_catalog(token,
catalog_name=name,
branch=BRANCH,
url=URL,
permission=False)
client.create_global_role_binding(globalRoleId=gr.id, userId=user.id)
# check that the user has the global role
target_grb = client.list_global_role_binding(userId=user.id,
globalRoleId=gr.id).data
assert len(target_grb) == 1
# the user can create catalogs
obj = validate_create_catalog(token,
catalog_name=name,
branch=BRANCH,
url=URL,
permission=True)
remove_resource(obj)
def test_global_role_create_2(remove_resource):
""" test that admin can create a new global role, assign it
to a standard user, then the user get the newly-assigned permission
a global role for listing clusters is used for this test.
"""
# create a new global role that permits listing clusters
gr = validate_create_global_role(ADMIN_TOKEN, True, True,
TEMPLATE_LIST_CLUSTER)
remove_resource(gr)
# create a new user
client = get_admin_client()
user, token = create_user(client)
remove_resource(user)
# check that the new user can not list cluster
user_client = get_client_for_token(token)
data = user_client.list_cluster().data
assert len(data) == 0, "the user should not be able to list any cluster"
client.create_global_role_binding(globalRoleId=gr.id, userId=user.id)
# check that the user has the global role
target_grb = client.list_global_role_binding(userId=user.id,
globalRoleId=gr.id).data
assert len(target_grb) == 1
# check that the new user can list cluster
data = user_client.list_cluster().data
assert len(data) > 0
def test_global_role_edit(remove_resource):
""" test that admin can edit a global role, and permissions of user who
binds to this role reflect the change"""
gr = validate_create_global_role(ADMIN_TOKEN, True, True)
remove_resource(gr)
client = get_admin_client()
user, user_token = create_user(client)
remove_resource(user)
# check that the user can NOT create catalogs
name = random_name()
validate_create_catalog(user_token,
catalog_name=name,
branch=BRANCH,
url=URL,
permission=False)
client.create_global_role_binding(globalRoleId=gr.id, userId=user.id)
# now he can create catalogs
catalog = validate_create_catalog(user_token,
catalog_name=name,
branch=BRANCH,
url=URL,
permission=True)
remove_resource(catalog)
# edit the global role
validate_edit_global_role(ADMIN_TOKEN, gr, True)
# the user can not create new catalog
validate_create_catalog(user_token,
catalog_name=name,
branch=BRANCH,
url=URL,
permission=False)
def test_global_role_delete(remove_resource):
""" test that admin can edit a global role, and permissions of user who
binds to this role reflect the change"""
gr = validate_create_global_role(ADMIN_TOKEN, True, True)
remove_resource(gr)
client = get_admin_client()
user, token = create_user(client)
remove_resource(user)
client.create_global_role_binding(globalRoleId=gr.id, userId=user.id)
name = random_name()
catalog = validate_create_catalog(token,
catalog_name=name,
branch=BRANCH,
url=URL,
permission=True)
remove_resource(catalog)
validate_delete_global_role(ADMIN_TOKEN, gr, True)
# the user can not create new catalog
validate_create_catalog(token,
catalog_name=name,
branch=BRANCH,
url=URL,
permission=False)
def test_builtin_global_role():
# builtin global role can not be deleted
client = get_admin_client()
gr_list = client.list_global_role(name="Manage Users").data
assert len(gr_list) == 1
gr = gr_list[0]
try:
client.delete(gr)
except ApiError as e:
assert e.error.status == 403
assert e.error.code == 'PermissionDenied'
# builtin global role can be set as new user default
gr = client.update(gr, {"newUserDefault": "true"})
gr = client.reload(gr)
assert gr.newUserDefault is True
gr = client.update(gr, {"newUserDefault": "false"})
gr = client.reload(gr)
assert gr.newUserDefault is False
def validate_list_global_role(token, permission=False):
client = get_client_for_token(token)
res = client.list_global_role().data
if not permission:
assert len(res) == 0
else:
assert len(res) > 0
@if_test_rbac
def test_rbac_global_role_list_cluster_owner():
# cluster owner can not list global roles
token = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_list_global_role(token, False)
@if_test_rbac
def test_rbac_global_role_list_cluster_member():
# cluster member can not list global roles
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_list_global_role(token, False)
@if_test_rbac
def test_rbac_global_role_list_project_owner():
# project owner can not list global roles
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_list_global_role(token, False)
@if_test_rbac
def test_rbac_global_role_list_project_member():
# project member can not list global roles
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_list_global_role(token, False)
@if_test_rbac
def test_rbac_global_role_list_project_read_only():
# project read-only can not list global roles
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_list_global_role(token, False)
@if_test_rbac
def test_rbac_global_role_create_cluster_owner():
# cluster owner can not create global roles
token = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_create_global_role(token, permission=False)
@if_test_rbac
def test_rbac_global_role_create_cluster_member():
# cluster member can not create global roles
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_create_global_role(token, permission=False)
@if_test_rbac
def test_rbac_global_role_create_project_owner():
# project owner can not create global roles
token = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_create_global_role(token, permission=False)
@if_test_rbac
def test_rbac_global_role_create_project_member():
# project member can not create global roles
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_create_global_role(token, permission=False)
@if_test_rbac
def test_rbac_global_role_create_project_read_only():
# project read-only can not create global roles
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_create_global_role(token, permission=False)
@if_test_rbac
def test_rbac_global_role_delete_cluster_owner(remove_resource):
# cluster owner can not delete global roles
gr = create_gr()
remove_resource(gr)
token = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_delete_global_role(token, gr, False)
@if_test_rbac
def test_rbac_global_role_delete_cluster_member(remove_resource):
# cluster member can not delete global roles
gr = create_gr()
remove_resource(gr)
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_delete_global_role(token, gr, False)
@if_test_rbac
def test_rbac_global_role_delete_project_owner(remove_resource):
# project owner can not delete global roles
gr = create_gr()
remove_resource(gr)
token = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_delete_global_role(token, gr, False)
@if_test_rbac
def test_rbac_global_role_delete_project_member(remove_resource):
# project member can not delete global roles
gr = create_gr()
remove_resource(gr)
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_delete_global_role(token, gr, False)
@if_test_rbac
def test_rbac_global_role_delete_project_read_only(remove_resource):
# project read-only can not delete global roles
gr = create_gr()
remove_resource(gr)
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_delete_global_role(token, gr, False)
@if_test_rbac
def test_rbac_global_role_edit_cluster_owner(remove_resource):
# cluster owner can not edit global roles
gr = create_gr()
remove_resource(gr)
token = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_edit_global_role(token, gr, False)
@if_test_rbac
def test_rbac_global_role_edit_cluster_member(remove_resource):
# cluster member can not edit global roles
gr = create_gr()
remove_resource(gr)
token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_edit_global_role(token, gr, False)
@if_test_rbac
def test_rbac_global_role_edit_project_owner(remove_resource):
# project owner can not edit global roles
gr = create_gr()
remove_resource(gr)
token = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_edit_global_role(token, gr, False)
@if_test_rbac
def test_rbac_global_role_edit_project_member(remove_resource):
# project member can not edit global roles
gr = create_gr()
remove_resource(gr)
token = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_edit_global_role(token, gr, False)
@if_test_rbac
def test_rbac_global_role_edit_project_read_only(remove_resource):
# project read-only can not edit global roles
gr = create_gr()
remove_resource(gr)
token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_edit_global_role(token, gr, False)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_global_admin_client_and_cluster()
create_kubeconfig(cluster)
def create_gr():
return validate_create_global_role(ADMIN_TOKEN, False, True)
def validate_create_global_role(token, new_user_default=False,
permission=False, template=None):
""" validate if the user has the permission to create global role."""
if template is None:
template = TEMPLATE_MANAGE_CATALOG
client = get_client_for_token(token)
t_name = random_name()
template = generate_template_global_role(t_name, new_user_default,
template)
# catch the expected error if the user has no permission to create
if not permission:
with pytest.raises(ApiError) as e:
client.create_global_role(template)
assert e.value.error.status == 403 and \
e.value.error.code == 'Forbidden', \
"user with no permission should receive 403: Forbidden"
return None
else:
try:
client.create_global_role(template)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ e.error.status + " " + e.error.code
# check that the global role is created
gr_list = client.list_global_role(name=t_name).data
assert len(gr_list) == 1
gr = gr_list[0]
# check that the global role is set to be the default
assert gr.newUserDefault == new_user_default
return gr
def validate_delete_global_role(token, global_role, permission=False):
""" validate if the user has the permission to delete global role."""
client = get_client_for_token(token)
# catch the expected error if the user has no permission to delete
if not permission:
with pytest.raises(ApiError) as e:
client.delete(global_role)
assert e.value.error.status == 403 and \
e.value.error.code == 'Forbidden', \
"user with no permission should receive 403: Forbidden"
return
else:
try:
client.delete(global_role)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ e.error.status + " " + e.error.code
# check that the global role is removed
client = get_client_for_token(ADMIN_TOKEN)
assert client.reload(global_role) is None
def validate_edit_global_role(token, global_role, permission=False):
""" for the testing purpose, this method removes all permissions in
the global role and unset it as new user default."""
client = get_client_for_token(token)
# edit the global role
template = deepcopy(TEMPLATE_MANAGE_CATALOG)
template["newUserDefault"] = "false"
template["rules"] = []
# catch the expected error if the user has no permission to edit
if not permission:
with pytest.raises(ApiError) as e:
client.update(global_role, template)
assert e.value.error.status == 403 and \
e.value.error.code == 'Forbidden', \
"user with no permission should receive 403: Forbidden"
return None
else:
try:
client.update(global_role, template)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ e.error.status + " " + e.error.code
# check that the global role is not the new user default
gr = client.reload(global_role)
assert gr.newUserDefault is False
# check that there is no rule left
assert (gr.rules is None or gr.rules == [])
return gr
| 14,873 | 34.414286 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_import_rke2_cluster.py
|
from python_terraform import * # NOQA
from .common import * # NOQA
RANCHER_AWS_AMI = os.environ.get("AWS_AMI", "")
RANCHER_AWS_USER = os.environ.get("AWS_USER", "ubuntu")
RANCHER_REGION = os.environ.get("AWS_REGION")
RANCHER_VPC_ID = os.environ.get("AWS_VPC")
RANCHER_SUBNETS = os.environ.get("AWS_SUBNET")
RANCHER_AWS_SG = os.environ.get("AWS_SECURITY_GROUPS")
RANCHER_AVAILABILITY_ZONE = os.environ.get("AWS_AVAILABILITY_ZONE")
RANCHER_QA_SPACE = os.environ.get("RANCHER_QA_SPACE", "qa.rancher.space.")
RANCHER_EC2_INSTANCE_CLASS = os.environ.get("RANCHER_EC2_INSTANCE_CLASS",
"t3a.medium")
HOST_NAME = os.environ.get('RANCHER_HOST_NAME', "sa")
RANCHER_IAM_ROLE = os.environ.get("RANCHER_IAM_ROLE")
RANCHER_RKE2_VERSION = os.environ.get("RANCHER_RKE2_VERSION", "")
RANCHER_RKE2_CHANNEL = os.environ.get("RANCHER_RKE2_CHANNEL", "null")
RANCHER_RANCHERD_VERSION = os.environ.get("RANCHER_RANCHERD_VERSION", "")
RANCHER_RKE2_NO_OF_SERVER_NODES = \
os.environ.get("RANCHER_RKE2_NO_OF_SERVER_NODES", 3)
RANCHER_RKE2_NO_OF_WORKER_NODES = \
os.environ.get("RANCHER_RKE2_NO_OF_WORKER_NODES", 0)
RANCHER_RKE2_SERVER_FLAGS = os.environ.get("RANCHER_RKE2_SERVER_FLAGS", "server")
RANCHER_RKE2_WORKER_FLAGS = os.environ.get("RANCHER_RKE2_WORKER_FLAGS", "agent")
RANCHER_RKE2_OPERATING_SYSTEM = os.environ.get("RANCHER_RKE2_OPERATING_SYSTEM")
AWS_VOLUME_SIZE = os.environ.get("AWS_VOLUME_SIZE", "20")
RANCHER_RKE2_RHEL_USERNAME = os.environ.get("RANCHER_RKE2_RHEL_USERNAME", "")
RANCHER_RKE2_RHEL_PASSWORD = os.environ.get("RANCHER_RKE2_RHEL_PASSWORD", "")
RANCHER_RKE2_KUBECONFIG_PATH = DATA_SUBDIR + "/rke2_kubeconfig.yaml"
def test_create_rancherd_multiple_control_cluster():
cluster_version = RANCHER_RANCHERD_VERSION
cluster_type = "rancherd"
rke2_clusterfilepath = create_rke2_multiple_control_cluster(cluster_type, \
cluster_version)
fqdn_file = "/tmp/" + RANCHER_HOSTNAME_PREFIX + "_fixed_reg_addr"
with open(fqdn_file, 'r') as f:
fqdn = f.read()
fqdn = fqdn.strip()
print("RANCHERD URL\nhttps://{0}:8443\n".format(fqdn), flush=True)
ip_file = "/tmp/" + RANCHER_HOSTNAME_PREFIX + "_master_ip"
with open(ip_file, 'r') as f:
ip = f.read()
ip = ip.strip()
keyPath = os.path.abspath('.') + '/.ssh/' + AWS_SSH_KEY_NAME
os.chmod(keyPath, 0o400)
print("\n\nRANCHERD USERNAME AND PASSWORD\n", flush=True)
cmd = "ssh -o StrictHostKeyChecking=no -i " + keyPath + " " + RANCHER_AWS_USER + \
"@" + ip + " rancherd reset-admin"
result = run_command(cmd, True)
print(result)
def test_create_rke2_multiple_control_cluster():
cluster_version = RANCHER_RKE2_VERSION
cluster_type = "rke2"
rke2_clusterfilepath = create_rke2_multiple_control_cluster(cluster_type, \
cluster_version)
def test_import_rke2_multiple_control_cluster():
client = get_user_client()
cluster_version = RANCHER_RKE2_VERSION
cluster_type = "rke2"
rke2_clusterfilepath = create_rke2_multiple_control_cluster(
cluster_type, cluster_version)
cluster = create_rancher_cluster(client, rke2_clusterfilepath)
def create_rke2_multiple_control_cluster(cluster_type, cluster_version):
rke2_kubeconfig_file = "rke2_kubeconfig.yaml"
rke2_clusterfilepath = DATA_SUBDIR + "/" + rke2_kubeconfig_file
tf_dir = DATA_SUBDIR + "/" + "terraform/rke2/master"
keyPath = os.path.abspath('.') + '/.ssh/' + AWS_SSH_KEY_NAME
os.chmod(keyPath, 0o400)
no_of_servers = int(RANCHER_RKE2_NO_OF_SERVER_NODES) - 1
tf = Terraform(working_dir=tf_dir,
variables={'region': RANCHER_REGION,
'vpc_id': RANCHER_VPC_ID,
'subnets': RANCHER_SUBNETS,
'sg_id': RANCHER_AWS_SG,
'availability_zone': RANCHER_AVAILABILITY_ZONE,
'aws_ami': RANCHER_AWS_AMI,
'aws_user': RANCHER_AWS_USER,
'resource_name': RANCHER_HOSTNAME_PREFIX,
'access_key': keyPath,
'ec2_instance_class': RANCHER_EC2_INSTANCE_CLASS,
'username': RANCHER_RKE2_RHEL_USERNAME,
'password': RANCHER_RKE2_RHEL_PASSWORD,
'rke2_version': cluster_version,
'rke2_channel': RANCHER_RKE2_CHANNEL,
'no_of_server_nodes': no_of_servers,
'server_flags': RANCHER_RKE2_SERVER_FLAGS,
'qa_space': RANCHER_QA_SPACE,
'node_os': RANCHER_RKE2_OPERATING_SYSTEM,
'cluster_type': cluster_type,
'iam_role': RANCHER_IAM_ROLE,
'volume_size': AWS_VOLUME_SIZE})
print("Creating cluster")
tf.init()
tf.plan(out="plan_server.out")
print(tf.apply("--auto-approve"))
print("\n\n")
if int(RANCHER_RKE2_NO_OF_WORKER_NODES) > 0:
tf_dir = DATA_SUBDIR + "/" + "terraform/rke2/worker"
tf = Terraform(working_dir=tf_dir,
variables={'region': RANCHER_REGION,
'vpc_id': RANCHER_VPC_ID,
'subnets': RANCHER_SUBNETS,
'sg_id': RANCHER_AWS_SG,
'availability_zone': RANCHER_AVAILABILITY_ZONE,
'aws_ami': RANCHER_AWS_AMI,
'aws_user': RANCHER_AWS_USER,
'ec2_instance_class': RANCHER_EC2_INSTANCE_CLASS,
'resource_name': RANCHER_HOSTNAME_PREFIX,
'access_key': keyPath,
'rke2_version': cluster_version,
'rke2_channel': RANCHER_RKE2_CHANNEL,
'username': RANCHER_RKE2_RHEL_USERNAME,
'password': RANCHER_RKE2_RHEL_PASSWORD,
'node_os': RANCHER_RKE2_OPERATING_SYSTEM,
'cluster_type': cluster_type,
'no_of_worker_nodes': int(RANCHER_RKE2_NO_OF_WORKER_NODES),
'worker_flags': RANCHER_RKE2_WORKER_FLAGS,
'iam_role': RANCHER_IAM_ROLE,
'volume_size': AWS_VOLUME_SIZE})
print("Joining worker nodes")
tf.init()
tf.plan(out="plan_worker.out")
print(tf.apply("--auto-approve"))
print("\n\n")
cmd = "cp /tmp/" + RANCHER_HOSTNAME_PREFIX + "_kubeconfig " + \
rke2_clusterfilepath
os.system(cmd)
is_file = os.path.isfile(rke2_clusterfilepath)
assert is_file
print(rke2_clusterfilepath)
with open(rke2_clusterfilepath, 'r') as f:
print(f.read())
check_cluster_status(rke2_clusterfilepath)
print("\n\nRKE2 Cluster Created\n")
cmd = "kubectl get nodes --kubeconfig=" + rke2_clusterfilepath
print(run_command(cmd))
cmd = "kubectl get pods -A --kubeconfig=" + rke2_clusterfilepath
print(run_command(cmd))
print("\n\n")
return rke2_clusterfilepath
def create_rancher_cluster(client, rke2_clusterfilepath):
if CLUSTER_NAME:
clustername = CLUSTER_NAME
else:
clustername = random_test_name("testcustom-rke2")
cluster = client.create_cluster(name=clustername)
cluster_token = create_custom_host_registration_token(client, cluster)
command = cluster_token.insecureCommand
finalimportcommand = command + " --kubeconfig " + rke2_clusterfilepath
print(finalimportcommand)
result = run_command(finalimportcommand)
clusters = client.list_cluster(name=clustername).data
assert len(clusters) > 0
print("Cluster is")
print(clusters[0])
# Validate the cluster
cluster = validate_cluster(client, clusters[0],
check_intermediate_state=False)
return cluster
def check_cluster_status(kubeconfig):
nodeNotReady = True
retries =0
try:
while nodeNotReady and (retries < 10):
cmd = "kubectl get nodes --no-headers -A --kubeconfig=" + kubeconfig
nodes = execute_command(cmd, False)
nodeNotReady = False
for node in nodes.strip().split("\n"):
state = node.split()[1]
if state != "Ready":
nodeNotReady = True
if not nodeNotReady:
break
time.sleep(60)
retries = retries + 1
if nodeNotReady:
raise AssertionError("Nodes failed to be in Ready state after 5 min")
actual_count_of_nodes = len(nodes.strip().split("\n"))
expected_count_of_nodes = int(RANCHER_RKE2_NO_OF_SERVER_NODES) - 1 + \
int(RANCHER_RKE2_NO_OF_WORKER_NODES)
if actual_count_of_nodes < expected_count_of_nodes:
raise AssertionError("Nodes failed to join the cluster, \
Expected: {} Actual: {}".format(expected_count_of_nodes, actual_count_of_nodes))
podsNotReady = True
retries = 0
while podsNotReady and (retries < 10):
cmd = "kubectl get pods --no-headers -A --kubeconfig=" + kubeconfig
pods = execute_command(cmd, False)
podsNotReady = False
for pod in pods.strip().split("\n"):
status = pod.split()[3]
if status != "Running" and status != "Completed":
podsNotReady = True
if not podsNotReady:
break
time.sleep(60)
retries = retries + 1
if podsNotReady:
raise AssertionError("Pods are not in desired state")
except AssertionError as e:
print("FAIL: {}".format(str(e)))
def execute_command(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
for i in range(3):
try:
res = subprocess.check_output(command, shell=True, text=True)
except subprocess.CalledProcessError:
print("Re-trying...")
time.sleep(10)
return res
| 10,535 | 43.455696 | 93 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/cli_objects.py
|
import os
import time
import subprocess
from pathlib import Path
from .common import get_user_client, random_test_name, \
DATA_SUBDIR, run_command, random_str
from .cli_common import DEFAULT_TIMEOUT, BaseCli
class RancherCli(BaseCli):
def __init__(self, url, token, context):
self.login(url, token, context=context)
self.projects = ProjectCli()
self.apps = AppCli()
self.mcapps = MultiClusterAppCli()
self.catalogs = CatalogCli()
self.clusters = ClusterCli()
self.nodes = NodeCli()
self.default_project = self.projects.create_project()
self.default_namespace = self.projects.create_namespace(
random_test_name("testdefault"))
BaseCli.DEFAULT_CONTEXT = self.default_project["id"]
self.switch_context(self.DEFAULT_CONTEXT)
def cleanup(self):
self.log.info("Cleaning up created test project: {}".format(
self.default_project["name"]))
self.switch_context(self.default_project["id"])
self.run_command("project delete {}".format(
self.default_project["id"]), expect_error=True)
class ProjectCli(BaseCli):
def create_project(self, name=None,
cluster_id=None, use_context=True):
if name is None:
name = random_test_name("ptest")
if cluster_id is None:
cluster = self.get_context()[0]
cluster_id = self.get_cluster_by_name(cluster)["id"]
self.run_command("projects create --cluster {} {}".format(cluster_id,
name))
project = None
for p in self.get_current_projects():
if p["name"] == name:
project = p
self.log.info("Project '%s' created successfully "
"in cluster '%s'", name, cluster_id)
break
if project is None:
self.log.error("Failed to create project '%s' "
"in cluster '%s'", name, cluster_id)
return project
if use_context:
self.log.info("Switching context to newly created project: "
"%s", name)
for p in self.get_current_projects():
if p["name"] == name:
self.switch_context(p["id"])
break
return project
def delete_project(self, name):
self.run_command("projects rm {}".format(name))
@classmethod
def get_current_projects(cls):
"""This uses the Rancher Python Client to retrieve the current projects
as there is not a CLI way to do this without passing stdin at the time
of creation (2/13/2020, Rancher v2.3.5).
Returns array of dictionaries containing id, name, clusterid, & uuid"""
client = get_user_client()
projects = client.list_project()
current_projects = []
for project in projects:
p = {
"id": project["id"],
"name": project["name"],
"clusterId": project["clusterId"],
"state": project["state"],
"uuid": project["uuid"]
}
current_projects.append(p)
return current_projects
def create_namespace(self, name=None):
if name is None:
name = random_test_name("nstest")
self.run_command("namespace create {}".format(name))
return name
def delete_namespace(self, name):
self.run_command("namespace delete {}".format(name))
self.log.info("Waiting for the namespace to be deleted")
deleted = self.wait_for_ready("namespace ls -q", name, condition_func=
lambda val, l: val not in l.splitlines())
return deleted
def get_namespaces(self):
namespaces = self.run_command("namespace ls --format "
"'{{.Namespace.Name}}"
"|{{.Namespace.State}}'")
return namespaces.splitlines()
def move_namespace(self, name, project_id):
self.run_command("namespace move {} {}".format(name, project_id))
class AppCli(BaseCli):
def install(self, app_name, namespace, **kwargs):
timeout = kwargs.get("timeout", DEFAULT_TIMEOUT)
version = kwargs.get("version", None)
context = kwargs.get("context", self.DEFAULT_CONTEXT)
values = kwargs.get("values", None)
cmd = "apps install {} --no-prompt -n {}".format(app_name, namespace)
if version is not None:
cmd = cmd + " --version {}".format(version)
if values is not None:
cmd = cmd + " --values {}".format(values)
self.switch_context(context)
app = self.run_command(cmd)
app = app.split('"')[1].split(" ")[2]
self.log.info("App is: {}".format(app))
self.log.info("Waiting for the app to be created")
# Wait for app to be "deploying"
self.wait_for_ready("apps ls --format '{{.App.Name}} {{.App.State}}' "
"| grep deploying | awk '{print $1}'", app,
timeout=timeout)
# Wait for app to be "active"
created = self.wait_for_ready("apps ls --format '{{.App.Name}} "
"{{.App.State}}' | grep active "
"| awk '{print $1}'", app,
timeout=timeout)
if not created:
self.log.warn("Failed to install app {} within timeout of {} "
"seconds.".format(app_name, timeout))
return self.get(app)
def get(self, app_name):
app = self.run_command("apps ls --format '{{.App.Name}}|{{.App.ID}}"
"|{{.App.State}}|{{.Version}}|{{.Template}}' "
"| grep " + app_name)
app = app.split("|")
return {"name": app[0], "id": app[1],
"state": app[2], "version": app[3], "template": app[4]}
def upgrade(self, app, **kwargs):
timeout = kwargs.get("timeout", DEFAULT_TIMEOUT)
version = kwargs.get("version", None)
if version is None:
version = self.run_command("apps st {} | tail -1".format(
app["template"]))
self.run_command("apps upgrade {} {}".format(app["name"], version))
self.log.info("Waiting for the app to be upgraded")
# Wait for app to be "deploying"
self.wait_for_ready("apps ls --format '{{.App.Name}} {{.App.State}}' "
"| grep deploying | awk '{print $1}'", app["name"])
# Wait for app to be "active"
upgraded = self.wait_for_ready("apps ls --format '{{.App.Name}} "
"{{.App.State}}' | grep active "
"| awk '{print $1}'", app["name"])
if not upgraded:
self.log.warn("Failed to upgrade app {} within timeout of {} "
"seconds.".format(app["name"], timeout))
return self.get(app["name"])
def rollback(self, app, desired_version, **kwargs):
timeout = kwargs.get("timeout", DEFAULT_TIMEOUT)
# Retrieve non-current versions that match desired version
revision = self.run_command(
"apps rollback -r %s | grep %s | awk '{print $1}'" %
(app["name"], desired_version)).splitlines()[0]
self.run_command("apps rollback {} {}".format(app["name"], revision))
self.log.info("Waiting for the app to be rolled back")
# Wait for app to be "deploying"
self.wait_for_ready("apps ls --format '{{.App.Name}} {{.App.State}}' "
"| grep deploying | awk '{print $1}'", app["name"])
# Wait for app to be "active"
rolled_back = self.wait_for_ready("apps ls --format '{{.App.Name}} "
"{{.App.State}}' | grep active "
"| awk '{print $1}'", app["name"])
if not rolled_back:
self.log.warn("Failed to rollback app {} within timeout of {} "
"seconds.".format(app["name"], timeout))
return self.get(app["name"])
def delete(self, app, **kwargs):
timeout = kwargs.get("timeout", DEFAULT_TIMEOUT)
self.run_command("apps delete {}".format(app["name"]))
self.log.info("Waiting for the app to be deleted")
deleted = self.wait_for_ready("apps ls -q", app["name"],
timeout=timeout, condition_func=
lambda val, l: val not in l.splitlines())
return deleted
def install_local_dir(self, catalog_url, branch, chart, **kwargs):
timeout = kwargs.get("timeout", DEFAULT_TIMEOUT)
context = kwargs.get("context", self.DEFAULT_CONTEXT)
version = kwargs.get("version", None)
current_dir = os.getcwd()
os.chdir(DATA_SUBDIR)
get_charts_cmd = \
run_command("git clone -b {} {}".format(branch, catalog_url))
time.sleep(5)
os.chdir("{}/integration-test-charts/charts/{}/{}".
format(DATA_SUBDIR, chart, version))
app_name = random_str()
self.switch_context(context)
app = self.run_command("apps install . {}".format(app_name))
app = app.split('"')[1].split(" ")[2]
self.log.info("App is: {}".format(app))
self.log.info("Waiting for the app to be created")
self.wait_for_ready("apps ls --format '{{.App.Name}} {{.App.State}}' "
"| grep deploying | awk '{print $1}'", app,
timeout=timeout)
# Wait for app to be "active"
created = self.wait_for_ready("apps ls --format '{{.App.Name}} "
"{{.App.State}}' | grep active "
"| awk '{print $1}'", app,
timeout=timeout)
if not created:
self.log.warn("Failed to install app {} within timeout of {} "
"seconds.".format(app_name, timeout))
os.chdir(current_dir)
return self.get(app)
class MultiClusterAppCli(BaseCli):
def install(self, template_name, **kwargs):
timeout = kwargs.get("timeout", DEFAULT_TIMEOUT)
version = kwargs.get("version", None)
targets = kwargs.get("targets", [self.DEFAULT_CONTEXT])
values = kwargs.get("values", None)
role = kwargs.get("role", "project-member")
cmd = "mcapps install {} --no-prompt --role {}".format(template_name, role)
for t in targets:
cmd += " --target {}".format(t)
if version is not None:
cmd += " --version {}".format(version)
if values is not None:
for k, v in values.items():
cmd += " --set {}={}".format(k, v)
app = self.run_command(cmd)
app = app.split('"')[1]
self.log.info("Multi-Cluster App is: {}".format(app))
# Wait for multi-cluster app to be "deploying"
self.wait_for_ready("mcapps ls --format '{{.App.Name}} {{.App.State}}'"
" | grep deploying | awk '{print $1}'",
app, timeout=timeout)
# Wait for multi-cluster app to be "active"
self.log.info("Waiting for the multi-cluster app to be created")
created = self.wait_for_ready("mcapps ls --format '{{.App.Name}} "
"{{.App.State}}' | grep active "
"| awk '{print $1}'", app,
timeout=timeout)
if not created:
self.log.warn("Failed to install multi-cluster app {} within "
"timeout of {} seconds.".format(
template_name, timeout))
return self.get(app)
def get(self, app_name):
app = self.run_command("mcapps ls --format '{{.App.Name}}|{{.App.ID}}"
"|{{.App.State}}|{{.Version}}"
"|{{.App.TemplateVersionID}}|"
"{{- range $key, $value := .App.Targets}}"
"{{$value.AppID}} {{$value.ProjectID}} "
"{{$value.State}};;{{- end}}' "
"| grep " + app_name)
app = app.split("|")
targets = []
for t in app[5].split(";;")[:-1]:
t = t.split()
self.switch_context(t[1])
t_app = AppCli.get(AppCli(), t[0])
targets.append(t_app)
revision = self.run_command("mcapps rollback -r %s | grep '*' | awk "
"'{print $2}'" % app_name).splitlines()[0]
return {"name": app[0], "id": app[1], "state": app[2],
"version": app[3], "template": app[4][:-(len(app[3]) + 1)],
"targets": targets, "revision": revision}
def upgrade(self, app, **kwargs):
timeout = kwargs.get("timeout", DEFAULT_TIMEOUT)
version = kwargs.get("version", None)
if version is None:
version = self.run_command("mcapps st {} | tail -1".format(
app["template"]))
self.run_command("mcapps upgrade {} {}".format(app["name"], version))
self.log.info("Waiting for the multi-cluster app to be upgraded")
# Wait for multi-cluster app to be "deploying"
self.wait_for_ready("mcapps ls --format '{{.App.Name}} {{.App.State}}'"
" | grep deploying | awk '{print $1}'",
app["name"], timeout=timeout)
# Wait for multi-cluster app to be "active"
upgraded = self.wait_for_ready("mcapps ls --format '{{.App.Name}} "
"{{.App.State}}' | grep active "
"| awk '{print $1}'", app["name"])
if not upgraded:
self.log.warn("Failed to upgrade multi-cluster app {} within "
"timeout of {} seconds.".format(
app["name"], timeout))
return self.get(app["name"])
def rollback(self, app_name, revision, **kwargs):
timeout = kwargs.get("timeout", DEFAULT_TIMEOUT)
self.run_command("mcapps rollback {} {}".format(app_name, revision))
self.log.info("Waiting for the multi-cluster app to be rolled back")
# Wait for multi-cluster app to be "deploying"
self.wait_for_ready("mcapps ls --format '{{.App.Name}} {{.App.State}}'"
" | grep deploying | awk '{print $1}'",
app_name, timeout=timeout)
# Wait for multi-cluster app to be "active"
rolled_back = self.wait_for_ready("mcapps ls --format '{{.App.Name}} "
"{{.App.State}}' | grep active "
"| awk '{print $1}'", app_name)
if not rolled_back:
self.log.warn("Failed to rollback multi-cluster app {} within "
"timeout of {} seconds.".format(app_name, timeout))
return self.get(app_name)
def delete(self, app, **kwargs):
timeout = kwargs.get("timeout", DEFAULT_TIMEOUT)
self.run_command("mcapps delete {}".format(app["name"]))
self.log.info("Waiting for the app to be deleted")
deleted = self.wait_for_ready("mcapps ls -q", app["name"],
timeout=timeout, condition_func=
lambda val, l: val not in l.splitlines())
apps_deleted = False
for target in app["targets"]:
apps_deleted = self.wait_for_ready("apps ls -q", target["name"],
timeout=timeout, condition_func=
lambda val, l:
val not in l.splitlines())
if not apps_deleted:
break
return deleted, apps_deleted
class CatalogCli(BaseCli):
def add(self, url, **kwargs):
branch = kwargs.get("branch", None)
catalog_name = random_test_name("ctest")
cmd = "catalog add {} {}".format(catalog_name, url)
if branch is not None:
cmd = cmd + " --branch " + branch
self.run_command(cmd)
return self.get(catalog_name)
def delete(self, name):
self.run_command("catalog delete " + name)
deleted = self.get(name) is None
return deleted
def get(self, name):
catalog = self.run_command("catalog ls --format '{{.Catalog.Name}}"
"|{{.Catalog.ID}}|{{.Catalog.URL}}"
"|{{.Catalog.Branch}}' | grep " + name)
if catalog is None:
return None
catalog = catalog.split("|")
return {"name": catalog[0], "id": catalog[1],
"url": catalog[2], "branch": catalog[3]}
class ClusterCli(BaseCli):
def delete(self, c_id):
self.run_command("clusters delete {}".format(c_id))
self.log.info("Waiting for the cluster to be deleted")
deleted = self.wait_for_ready("cluster ls -q", c_id, condition_func=
lambda val, l: val not in l.splitlines())
return deleted
class NodeCli(BaseCli):
def get(self):
result = self.run_command(
"nodes ls --format '{{.Name}}|{{.Node.IPAddress}}'").splitlines()
nodes = []
for n in result:
nodes.append({
"name": n.split("|")[0],
"ip": n.split("|")[1]
})
return nodes
def ssh(self, node, cmd, known=False, is_jenkins=False):
if is_jenkins:
home = str(Path.home())
tilde = home
else:
tilde = '~'
if not known:
self.log.debug("Determining if host is already known")
known_hosts = os.path.expanduser(
"{}/.ssh/known_hosts".format(tilde))
with open(known_hosts) as file:
for line in file:
if node["ip"] in line:
known = True
break
if not known:
self.log.debug("Host is not known. Attempting to add it to file")
try:
self.log.debug("Storing ecdsa key in known hosts")
subprocess.run("ssh-keyscan -t ecdsa {} >> {}"
"/.ssh/known_hosts".format(node["ip"], tilde),
shell=True, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
self.log.info("Error storing ecdsa key! Result: %s", e.stderr)
ssh_result = self.run_command('ssh {} "{}"'.format(node["name"], cmd))
return ssh_result
| 19,131 | 43.596737 | 83 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_gke_cluster.py
|
from .common import * # NOQA
import requests
import pytest
CREDENTIALS = os.environ.get('RANCHER_GKE_CREDENTIAL', "")
GKE_MASTER_VERSION = os.environ.get('RANCHER_GKE_MASTER_VERSION', "")
gkecredential = pytest.mark.skipif(not CREDENTIALS, reason='GKE Credentials '
'not provided, cannot create cluster')
@gkecredential
def test_create_gke_cluster():
# Obtain GKE config data
gke_version, credential_data = get_gke_version_credentials()
client, cluster = create_and_validate_gke_cluster("test-auto-gke",
gke_version,
credential_data)
cluster_cleanup(client, cluster)
def create_and_validate_gke_cluster(name, version, credential_data):
gke_config = get_gke_config(name, version, credential_data)
client = get_user_client()
print("Cluster creation")
cluster = client.create_cluster(gke_config)
print(cluster)
cluster = validate_cluster(client, cluster, check_intermediate_state=True,
skipIngresscheck=True)
return client, cluster
def get_gke_version_credentials(multiple_versions=False):
credfilename = "credential.txt"
PATH = os.path.dirname(os.path.realpath(__file__))
credfilepath = PATH + "/" + credfilename
# The json GKE credentials file is being written to a file and then re-read
f = open(credfilepath, "w")
f.write(CREDENTIALS)
f.close()
credentialdata = readDataFile(os.path.dirname(os.path.realpath(__file__)) +
"/", credfilename)
print(credentialdata)
if not GKE_MASTER_VERSION:
data_test = {
"credentials": credentialdata,
"zone": "us-central1-f",
"projectId": "rancher-qa"
}
headers = {"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + USER_TOKEN}
gke_version_url = CATTLE_TEST_URL + "/meta/gkeVersions"
print(gke_version_url)
response = requests.post(gke_version_url, json=data_test,
verify=False, headers=headers)
assert response.status_code == 200
assert response.content is not None
print(response.content)
json_response = json.loads(response.content)
validMasterVersions = json_response["validMasterVersions"]
if multiple_versions and len(validMasterVersions) > 1:
gkemasterversion = [validMasterVersions[0],
validMasterVersions[-1]]
else:
gkemasterversion = validMasterVersions[0]
else:
gkemasterversion = GKE_MASTER_VERSION
print(gkemasterversion)
return gkemasterversion, credentialdata
def get_gke_config(name, version, credential_data):
# Get GKE configuration
gke_config = {
"googleKubernetesEngineConfig": {
"diskSizeGb": 100,
"enableAlphaFeature": False,
"enableHorizontalPodAutoscaling": True,
"enableHttpLoadBalancing": True,
"enableKubernetesDashboard": False,
"enableLegacyAbac": False,
"enableNetworkPolicyConfig": True,
"enableStackdriverLogging": True,
"enableStackdriverMonitoring": True,
"masterVersion": version,
"machineType": "g1-small",
"type": "googleKubernetesEngineConfig",
"nodeCount": 3,
"zone": "us-central1-f",
"clusterIpv4Cidr": " ",
"credential": credential_data,
"projectId": "rancher-qa",
},
"name": name,
"type": "cluster"
}
return gke_config
| 3,791 | 34.111111 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_rke2_airgap.py
|
import os
import time
from lib.aws import AWS_USER
from .common import AmazonWebServices
from .test_airgap import (AG_HOST_NAME, BASTION_ID, NUMBER_OF_INSTANCES,
add_cleaned_images, get_bastion_node,
run_command_on_airgap_node, setup_ssh_key,
wait_for_airgap_pods_ready)
RANCHER_RKE2_VERSION = os.environ.get("RANCHER_RKE2_VERSION", "")
RKE2_SERVER_OPTIONS = os.environ.get("RANCHER_RKE2_SERVER_OPTIONS", "")
RKE2_AGENT_OPTIONS = os.environ.get("RANCHER_RKE2_AGENT_OPTIONS", "")
def test_deploy_airgap_rke2_private_registry():
bastion_node = deploy_noauth_bastion_server()
failures = add_rke2_images_to_private_registry(bastion_node,
RANCHER_RKE2_VERSION)
assert failures == [], "Failed to add images: {}".format(failures)
ag_nodes = prepare_airgap_rke2(bastion_node, NUMBER_OF_INSTANCES,
'private_registry')
assert len(ag_nodes) == NUMBER_OF_INSTANCES
print(
'{} airgapped rke2 instance(s) created.\n'
'Connect to these and run commands by connecting to bastion node, '
'then connecting to these:\n'
'ssh -i {}.pem {}@NODE_PRIVATE_IP'.format(
NUMBER_OF_INSTANCES, bastion_node.ssh_key_name, AWS_USER))
for ag_node in ag_nodes:
assert ag_node.private_ip_address is not None
assert ag_node.public_ip_address is None
server_ops = RKE2_SERVER_OPTIONS + " --system-default-registry={}".format(
bastion_node.host_name)
agent_ops = RKE2_AGENT_OPTIONS + " --system-default-registry={}".format(
bastion_node.host_name)
deploy_airgap_rke2_cluster(bastion_node, ag_nodes, server_ops, agent_ops)
wait_for_airgap_pods_ready(bastion_node, ag_nodes,
kubectl='/var/lib/rancher/rke2/bin/kubectl',
kubeconfig='/etc/rancher/rke2/rke2.yaml')
def test_deploy_airgap_rke2_tarball():
bastion_node = get_bastion_node(BASTION_ID)
add_rke2_tarball_to_bastion(bastion_node, RANCHER_RKE2_VERSION)
ag_nodes = prepare_airgap_rke2(
bastion_node, NUMBER_OF_INSTANCES, 'tarball')
assert len(ag_nodes) == NUMBER_OF_INSTANCES
print(
'{} airgapped rke2 instance(s) created.\n'
'Connect to these and run commands by connecting to bastion node, '
'then connecting to these:\n'
'ssh -i {}.pem {}@NODE_PRIVATE_IP'.format(
NUMBER_OF_INSTANCES, bastion_node.ssh_key_name, AWS_USER))
for ag_node in ag_nodes:
assert ag_node.private_ip_address is not None
assert ag_node.public_ip_address is None
deploy_airgap_rke2_cluster(bastion_node, ag_nodes,
RKE2_SERVER_OPTIONS, RKE2_AGENT_OPTIONS)
wait_for_airgap_pods_ready(bastion_node, ag_nodes,
kubectl='/var/lib/rancher/rke2/bin/kubectl',
kubeconfig='/etc/rancher/rke2/rke2.yaml')
def deploy_noauth_bastion_server():
node_name = AG_HOST_NAME + "-noauthbastion"
# Create Bastion Server in AWS
bastion_node = AmazonWebServices().create_node(node_name)
setup_ssh_key(bastion_node)
# Generate self signed certs
generate_certs_command = \
'mkdir -p certs && sudo openssl req -newkey rsa:4096 -nodes -sha256 ' \
'-keyout certs/domain.key -x509 -days 365 -out certs/domain.crt ' \
'-subj "/C=US/ST=AZ/O=Rancher QA/CN={}"'.format(bastion_node.host_name)
bastion_node.execute_command(generate_certs_command)
# Ensure docker uses the certs that were generated
update_docker_command = \
'sudo mkdir -p /etc/docker/certs.d/{0} && ' \
'sudo cp ~/certs/domain.crt /etc/docker/certs.d/{0}/ca.crt && ' \
'sudo service docker restart'.format(bastion_node.host_name)
bastion_node.execute_command(update_docker_command)
# Run private registry
run_private_registry_command = \
'sudo docker run -d --restart=always --name registry ' \
'-v "$(pwd)"/certs:/certs -e REGISTRY_HTTP_ADDR=0.0.0.0:443 ' \
'-e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt ' \
'-e REGISTRY_HTTP_TLS_KEY=/certs/domain.key -p 443:443 registry:2'
bastion_node.execute_command(run_private_registry_command)
time.sleep(5)
print("Bastion Server Details:\nNAME: {}\nHOST NAME: {}\n"
"INSTANCE ID: {}\n".format(node_name, bastion_node.host_name,
bastion_node.provider_node_id))
return bastion_node
def add_rke2_tarball_to_bastion(bastion_node, rke2_version):
get_tarball_command = \
'wget -O rke2-airgap-images.tar.gz https://github.com/rancher/rke2/' \
'releases/download/{0}/rke2-images.linux-amd64.tar.gz && ' \
'wget -O rke2 https://github.com/rancher/rke2/' \
'releases/download/{0}/rke2.linux-amd64'.format(rke2_version)
bastion_node.execute_command(get_tarball_command)
def add_rke2_images_to_private_registry(bastion_node, rke2_version):
get_images_command = \
'wget -O rke2-images.txt https://github.com/rancher/rke2/' \
'releases/download/{0}/rke2-images.linux-amd64.txt && ' \
'wget -O rke2 https://github.com/rancher/rke2/' \
'releases/download/{0}/rke2.linux-amd64'.format(rke2_version)
bastion_node.execute_command(get_images_command)
images = bastion_node.execute_command(
'cat rke2-images.txt')[0].strip().split("\n")
assert images
return add_cleaned_images(bastion_node, images)
def prepare_airgap_rke2(bastion_node, number_of_nodes, method):
node_name = AG_HOST_NAME + "-rke2-airgap"
# Create Airgap Node in AWS
ag_nodes = AmazonWebServices().create_multiple_nodes(
number_of_nodes, node_name, public_ip=False)
for num, ag_node in enumerate(ag_nodes):
# Copy relevant rke2 files to airgapped node
ag_node_copy_files = \
'scp -i "{0}.pem" -o StrictHostKeyChecking=no ./rke2 ' \
'{1}@{2}:~/rke2'.format(bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(ag_node_copy_files)
ag_node_make_executable = \
'sudo mv ./rke2 /usr/local/bin/rke2 && ' \
'sudo chmod +x /usr/local/bin/rke2'
run_command_on_airgap_node(bastion_node, ag_node,
ag_node_make_executable)
if method == 'private_registry':
ag_node_copy_certs = \
'scp -i "{0}.pem" -o StrictHostKeyChecking=no certs/* ' \
'{1}@{2}:~/'.format(bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(ag_node_copy_certs)
ag_node_update_certs = \
'sudo cp domain.crt ' \
'/usr/local/share/ca-certificates/domain.crt && ' \
'sudo update-ca-certificates'
run_command_on_airgap_node(bastion_node, ag_node,
ag_node_update_certs)
elif method == 'tarball':
ag_node_copy_tarball = \
'scp -i "{0}.pem" -o StrictHostKeyChecking=no ' \
'./rke2-airgap-images.tar.gz ' \
'{1}@{2}:~/rke2-airgap-images.tar.gz'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(ag_node_copy_tarball)
ag_node_add_tarball_to_dir = \
'sudo mkdir -p /var/lib/rancher/rke2/agent/images/ && ' \
'sudo cp ./rke2-airgap-images.tar.gz ' \
'/var/lib/rancher/rke2/agent/images/ && sudo gunzip ' \
'/var/lib/rancher/rke2/agent/images/rke2-airgap-images.tar.gz'
run_command_on_airgap_node(bastion_node, ag_node,
ag_node_add_tarball_to_dir)
print("Airgapped RKE2 Instance Details:\nNAME: {}-{}\nPRIVATE IP: {}\n"
"".format(node_name, num, ag_node.private_ip_address))
return ag_nodes
def deploy_airgap_rke2_cluster(bastion_node, ag_nodes, server_ops, agent_ops):
token = ""
server_ip = ag_nodes[0].private_ip_address
for num, ag_node in enumerate(ag_nodes):
if num == 0:
# Install rke2 server
install_rke2_server = \
'sudo rke2 server --write-kubeconfig-mode 644 {} ' \
'> /dev/null 2>&1 &'.format(server_ops)
run_command_on_airgap_node(bastion_node, ag_node,
install_rke2_server)
time.sleep(30)
token_command = 'sudo cat /var/lib/rancher/rke2/server/node-token'
token = run_command_on_airgap_node(bastion_node, ag_node,
token_command)[0].strip()
else:
install_rke2_worker = \
'sudo rke2 agent --server https://{}:9345 ' \
'--token {} {} > /dev/null 2>&1 &'.format(
server_ip, token, agent_ops)
run_command_on_airgap_node(bastion_node, ag_node,
install_rke2_worker)
time.sleep(15)
| 9,357 | 43.990385 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_import_cluster.py
|
import os
from lib.aws import AmazonWebServices
from .common import get_user_client
from .common import run_command
from .common import random_test_name
from .common import run_command_with_stderr
from .common import create_custom_host_registration_token
from .common import validate_cluster
from .common import cluster_cleanup
from .common import readDataFile
RANCHER_CLEANUP_CLUSTER = os.environ.get('RANCHER_CLEANUP_CLUSTER', "True")
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
AWS_NODE_COUNT = int(os.environ.get("AWS_NODE_COUNT", 3))
HOST_NAME = os.environ.get('RANCHER_HOST_NAME', "testcustom")
RKE_K8S_VERSION = os.environ.get("RANCHER_RKE_K8S_VERSION","")
def test_import_rke_cluster():
client, cluster, aws_nodes = create_and_validate_import_cluster()
cluster_cleanup(client, cluster, aws_nodes)
def create_and_validate_import_cluster(k8s_version="", supportmatrix=False):
client = get_user_client()
# Create AWS nodes for the cluster
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
AWS_NODE_COUNT, random_test_name(HOST_NAME))
assert len(aws_nodes) == AWS_NODE_COUNT
# Create RKE config
cluster_filename = random_test_name("cluster")
clusterfilepath = create_rke_cluster_config(aws_nodes, cluster_filename)
is_file = os.path.isfile(clusterfilepath)
assert is_file
# update clusterfilepath with k8s version
if supportmatrix:
file_object = open(clusterfilepath, 'a')
version = "kubernetes_version: " + k8s_version
file_object.write(version)
# Close the file
file_object.close()
# Print config file to be used for rke cluster create
configfile = run_command("cat " + clusterfilepath)
print("RKE Config file generated:\n")
print(configfile)
# Create RKE K8s Cluster
clustername = random_test_name("testimport")
rkecommand = "rke up --config {}".format(clusterfilepath)
print(rkecommand)
result = run_command_with_stderr(rkecommand)
print("RKE up result: ", result)
# Import the RKE cluster
cluster = client.create_cluster(name=clustername)
print(cluster)
cluster_token = create_custom_host_registration_token(client, cluster)
command = cluster_token.insecureCommand
print(command)
rke_config_file = "kube_config_" + cluster_filename + ".yml"
finalimportcommand = "{} --kubeconfig {}/{}".format(command, DATA_SUBDIR,
rke_config_file)
print("Final command to import cluster is:")
print(finalimportcommand)
result = run_command(finalimportcommand)
print(result)
clusters = client.list_cluster(name=clustername).data
assert len(clusters) > 0
print("Cluster is")
print(clusters[0])
# Validate the cluster
cluster = validate_cluster(client, clusters[0],
check_intermediate_state=False)
return client, cluster, aws_nodes
def test_generate_rke_config():
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
AWS_NODE_COUNT, random_test_name(HOST_NAME))
assert len(aws_nodes) == AWS_NODE_COUNT
# Create RKE config
cluster_filename = random_test_name("cluster")
rkeconfigpath = create_rke_cluster_config(aws_nodes, cluster_filename)
rkeconfig = run_command("cat " + rkeconfigpath)
print("RKE Config file generated\n")
print(rkeconfig)
def create_rke_cluster_config(aws_nodes, cluster_filename):
"""
Generates RKE config file with a minimum of 3 nodes with ALL roles(etcd,
worker and control plane). If the requested number of nodes is greater
than 3, additional nodes with worker role are created
"""
# Create RKE Config file
configfile = "cluster.yml"
rkeconfig = readDataFile(DATA_SUBDIR, configfile)
print(rkeconfig)
for i in range(0, AWS_NODE_COUNT):
ipstring = "$ip" + str(i)
intipstring = "$intip" + str(i)
rkeconfig = rkeconfig.replace(ipstring, aws_nodes[i].public_ip_address)
rkeconfig = rkeconfig.replace(intipstring,
aws_nodes[i].private_ip_address)
rkeconfig = rkeconfig.replace("$AWS_SSH_KEY_NAME", AWS_SSH_KEY_NAME)
rkeconfig = rkeconfig.replace("$KUBERNETES_VERSION", RKE_K8S_VERSION)
clusterfilepath = DATA_SUBDIR + "/" + cluster_filename + ".yml"
print(clusterfilepath)
f = open(clusterfilepath, "w")
f.write(rkeconfig)
if AWS_NODE_COUNT > 3:
for i in range(3, AWS_NODE_COUNT):
for j in range(i, i + 1):
f.write(" - address: {}\n".format(
aws_nodes[j].public_ip_address))
f.write(" internaladdress: {}\n".format(
aws_nodes[j].private_ip_address))
f.write(" user: ubuntu\n")
f.write(" role: [worker]\n")
f.close()
return clusterfilepath
| 5,051 | 35.875912 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_custom_host_reg.py
|
from .test_auth import enable_ad, load_setup_data, enable_openldap, \
OPENLDAP_AUTH_USER_PASSWORD, enable_freeipa, FREEIPA_AUTH_USER_PASSWORD
from .common import * # NOQA
import ast
AGENT_REG_CMD = os.environ.get('RANCHER_AGENT_REG_CMD', "")
HOST_COUNT = int(os.environ.get('RANCHER_HOST_COUNT', 1))
HOST_NAME = os.environ.get('RANCHER_HOST_NAME', "testsa")
RANCHER_SERVER_VERSION = os.environ.get('RANCHER_SERVER_VERSION',
"master-head")
rke_config = {"authentication": {"type": "authnConfig", "strategy": "x509"},
"ignoreDockerVersion": False,
"network": {"type": "networkConfig", "plugin": "canal"},
"type": "rancherKubernetesEngineConfig"
}
AUTO_DEPLOY_CUSTOM_CLUSTER = ast.literal_eval(
os.environ.get('RANCHER_AUTO_DEPLOY_CUSTOM_CLUSTER', "True"))
KEYPAIR_NAME_PREFIX = os.environ.get('RANCHER_KEYPAIR_NAME_PREFIX', "")
RANCHER_CLUSTER_NAME = os.environ.get('RANCHER_CLUSTER_NAME', "")
RANCHER_ELASTIC_SEARCH_ENDPOINT = os.environ.get(
'RANCHER_ELASTIC_SEARCH_ENDPOINT', "")
K8S_VERSION = os.environ.get('RANCHER_K8S_VERSION', "")
def test_add_custom_host():
aws_nodes = AmazonWebServices().create_multiple_nodes(
HOST_COUNT, random_test_name("testsa" + HOST_NAME))
if AGENT_REG_CMD != "":
for aws_node in aws_nodes:
additional_options = " --address " + aws_node.public_ip_address + \
" --internal-address " + \
aws_node.private_ip_address
if 'Administrator' == aws_node.ssh_user:
agent_cmd_temp = AGENT_REG_CMD.replace('| iex', ' ' + additional_options + ' | iex ')
agent_cmd = agent_cmd_temp + additional_options
else:
agent_cmd = AGENT_REG_CMD + additional_options
aws_node.execute_command(agent_cmd)
print("Nodes: " + aws_node.public_ip_address)
def test_delete_keypair():
AmazonWebServices().delete_keypairs(KEYPAIR_NAME_PREFIX)
def test_deploy_rancher_server():
if "v2.5" in RANCHER_SERVER_VERSION or \
"master" in RANCHER_SERVER_VERSION or \
"v2.6" in RANCHER_SERVER_VERSION:
RANCHER_SERVER_CMD = \
'sudo docker run -d --privileged --name="rancher-server" ' \
'--restart=unless-stopped -p 80:80 -p 443:443 ' \
'rancher/rancher'
else:
RANCHER_SERVER_CMD = \
'sudo docker run -d --name="rancher-server" ' \
'--restart=unless-stopped -p 80:80 -p 443:443 ' \
'rancher/rancher'
RANCHER_SERVER_CMD += ":" + RANCHER_SERVER_VERSION + " --trace"
print(RANCHER_SERVER_CMD)
aws_nodes = AmazonWebServices().create_multiple_nodes(
1, random_test_name("testsa" + HOST_NAME))
aws_nodes[0].execute_command(RANCHER_SERVER_CMD)
time.sleep(120)
RANCHER_SERVER_URL = "https://" + aws_nodes[0].public_ip_address
print(RANCHER_SERVER_URL)
wait_until_active(RANCHER_SERVER_URL, timeout=300)
RANCHER_SET_DEBUG_CMD = \
"sudo docker exec rancher-server loglevel --set debug"
aws_nodes[0].execute_command(RANCHER_SET_DEBUG_CMD)
token = set_url_password_token(RANCHER_SERVER_URL)
admin_client = rancher.Client(url=RANCHER_SERVER_URL + "/v3",
token=token, verify=False)
if AUTH_PROVIDER:
enable_url = \
RANCHER_SERVER_URL + "/v3/" + AUTH_PROVIDER + \
"Configs/" + AUTH_PROVIDER.lower() + "?action=testAndApply"
auth_admin_user = load_setup_data()["admin_user"]
auth_user_login_url = \
RANCHER_SERVER_URL + "/v3-public/" + AUTH_PROVIDER + "Providers/" \
+ AUTH_PROVIDER.lower() + "?action=login"
if AUTH_PROVIDER == "activeDirectory":
enable_ad(auth_admin_user, token, enable_url=enable_url,
password=AUTH_USER_PASSWORD, nested=NESTED_GROUP_ENABLED)
user_token = login_as_auth_user(
load_setup_data()["standard_user"],
AUTH_USER_PASSWORD,
login_url=auth_user_login_url)["token"]
elif AUTH_PROVIDER == "openLdap":
enable_openldap(auth_admin_user, token, enable_url=enable_url,
password=OPENLDAP_AUTH_USER_PASSWORD,
nested=NESTED_GROUP_ENABLED)
user_token = login_as_auth_user(
load_setup_data()["standard_user"],
OPENLDAP_AUTH_USER_PASSWORD,
login_url=auth_user_login_url)["token"]
elif AUTH_PROVIDER == "freeIpa":
enable_freeipa(auth_admin_user, token, enable_url=enable_url,
password=FREEIPA_AUTH_USER_PASSWORD,
nested=NESTED_GROUP_ENABLED)
user_token = login_as_auth_user(
load_setup_data()["standard_user"],
FREEIPA_AUTH_USER_PASSWORD,
login_url=auth_user_login_url)["token"]
else:
AUTH_URL = \
RANCHER_SERVER_URL + "/v3-public/localproviders/local?action=login"
user, user_token = create_user(admin_client, AUTH_URL)
env_details = "env.CATTLE_TEST_URL='" + RANCHER_SERVER_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + token + "'\n"
env_details += "env.USER_TOKEN='" + user_token + "'\n"
if UPDATE_KDM:
update_and_validate_kdm(KDM_URL, admin_token=token,
rancher_api_url=RANCHER_SERVER_URL + "/v3")
if AUTO_DEPLOY_CUSTOM_CLUSTER:
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
5, random_test_name("testcustom"))
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
client = rancher.Client(url=RANCHER_SERVER_URL + "/v3",
token=user_token, verify=False)
if K8S_VERSION != "":
rke_config["kubernetesVersion"] = K8S_VERSION
print("the rke config for creating the cluster:")
print(rke_config)
cluster = client.create_cluster(
name=random_name(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(
client, cluster, node_roles[i], aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
validate_cluster_state(client, cluster)
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def test_delete_rancher_server():
client = get_admin_client()
clusters = client.list_cluster().data
for cluster in clusters:
delete_cluster(client, cluster)
clusters = client.list_cluster().data
start = time.time()
while len(clusters) > 0:
time.sleep(30)
clusters = client.list_cluster().data
if time.time() - start > MACHINE_TIMEOUT:
exceptionMsg = 'Timeout waiting for clusters to be removed'
raise Exception(exceptionMsg)
ip_address = CATTLE_TEST_URL[8:]
print("Ip Address:" + ip_address)
filters = [
{'Name': 'network-interface.addresses.association.public-ip',
'Values': [ip_address]}]
aws_nodes = AmazonWebServices().get_nodes(filters)
assert len(aws_nodes) == 1
AmazonWebServices().delete_nodes(aws_nodes, wait_for_deleted=True)
def test_cluster_enable_logging_elasticsearch():
client = get_user_client()
cluster = get_cluster_by_name(client, RANCHER_CLUSTER_NAME)
cluster_name = cluster.name
client.create_cluster_logging(name=random_test_name("elasticsearch"),
clusterId=cluster.id,
elasticsearchConfig={
"dateFormat": "YYYY-MM-DD",
"sslVerify": False,
"sslVersion": "TLSv1_2",
"indexPrefix": cluster_name,
"endpoint":
RANCHER_ELASTIC_SEARCH_ENDPOINT}
)
projects = client.list_project(name="System",
clusterId=cluster.id).data
assert len(projects) == 1
project = projects[0]
p_client = get_project_client_for_token(project, USER_TOKEN)
wait_for_app_to_active(p_client, "rancher-logging")
| 8,698 | 43.15736 | 101 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_catalog_library.py
|
"""
This file has tests to deploy apps in a project created in a cluster.
Test requirements:
Env variables - Cattle_url, Admin Token, User Token, Cluster Name
Test on at least 3 worker nodes
App versions are given in 'cataloglib_appversion.json' file
"""
import json
from .common import os
from .common import pytest
from .common import create_ns
from .common import create_catalog_external_id
from .common import validate_app_deletion
from .common import get_user_client_and_cluster
from .common import create_kubeconfig
from .common import get_cluster_client_for_token
from .common import create_project
from .common import random_test_name
from .common import get_defaut_question_answers
from .common import validate_catalog_app
from .common import get_project_client_for_token
from .common import USER_TOKEN
from .common import get_user_client
cluster_info = {"cluster": None, "cluster_client": None,
"project": None, "project_client": None,
"user_client": None}
catalog_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"./resource/cataloglib_appversion.json")
with open(catalog_filename, "r") as app_v:
app_data = json.load(app_v)
@pytest.mark.parametrize('app_name, app_version', app_data.items())
def test_catalog_app_deploy(app_name, app_version):
"""
Runs for app from 'cataloglib_appversion.json',
creates relevant namespace and deploy them.
Validates status of the app, version and answer.
try block is to make sure apps are deleted even
after they fail to validate.
"""
user_client = cluster_info["user_client"]
project_client = cluster_info["project_client"]
cluster_client = cluster_info["cluster_client"]
cluster = cluster_info["cluster"]
project = cluster_info["project"]
ns = create_ns(cluster_client, cluster, project, app_name)
app_ext_id = create_catalog_external_id('library',
app_name, app_version)
answer = get_defaut_question_answers(user_client, app_ext_id)
try:
app = project_client.create_app(
name=random_test_name(),
externalId=app_ext_id,
targetNamespace=ns.name,
projectId=ns.projectId,
answers=answer)
validate_catalog_app(project_client, app, app_ext_id, answer)
except (AssertionError, RuntimeError):
assert False, "App {} deployment/Validation failed.".format(app_name)
finally:
project_client.delete(app)
validate_app_deletion(project_client, app.id)
user_client.delete(ns)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
"""
Creates project in a cluster and collects details of
user, project and cluster
"""
user_client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
cluster_client = get_cluster_client_for_token(cluster, USER_TOKEN)
project = create_project(user_client, cluster,
random_test_name("App-deployment"))
project_client = get_project_client_for_token(project, USER_TOKEN)
cluster_info["cluster"] = cluster
cluster_info["cluster_client"] = cluster_client
cluster_info["project"] = project
cluster_info["project_client"] = project_client
cluster_info["user_client"] = user_client
def fin():
client = get_user_client()
client.delete(cluster_info["project"])
request.addfinalizer(fin)
| 3,620 | 36.71875 | 77 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_aks_cluster.py
|
from .common import * # NOQA
import pytest
import requests
AKS_CLUSTER_VERSION = os.environ.get('RANCHER_AKS_CLUSTER_VERSION', '')
SSH_KEY = os.environ.get('RANCHER_SSH_KEY', "")
SUBSCRIPTION_ID = os.environ.get('RANCHER_AKS_SUBSCRIPTION_ID', '')
TENANT_ID = os.environ.get('RANCHER_AKS_TENANT_ID', '')
CLIENT_ID = os.environ.get('RANCHER_AKS_CLIENT_ID', '')
SECRET_KEY = os.environ.get('RANCHER_AKS_SECRET_KEY', '')
RESOURCE_GROUP = os.environ.get('RANCHER_AKS_RESOURCE_GROUP', '')
AKS_REGION = os.environ.get('RANCHER_AKS_REGION', 'eastus')
akscredential = pytest.mark.skipif(not (SUBSCRIPTION_ID and TENANT_ID and
CLIENT_ID and SECRET_KEY),
reason='AKS Credentials not provided, '
'cannot create cluster')
@akscredential
def test_create_aks_cluster():
version = get_aks_version()
client, cluster = create_and_validate_aks_cluster(version)
cluster_cleanup(client, cluster)
def create_and_validate_aks_cluster(version):
client = get_user_client()
aks_config = get_aks_config(version)
print("Cluster creation")
cluster = client.create_cluster(aks_config)
cluster = validate_cluster(client, cluster, check_intermediate_state=True,
skipIngresscheck=True)
return client, cluster
def get_aks_version(multiple_versions=False):
if not AKS_CLUSTER_VERSION:
data_test = {
"region": "eastus",
"subscriptionId": SUBSCRIPTION_ID,
"tenantId": TENANT_ID,
"clientId": CLIENT_ID,
"clientSecret": SECRET_KEY
}
headers = {"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + USER_TOKEN}
aks_version_url = CATTLE_TEST_URL + "/meta/aksVersions"
print(aks_version_url)
response = requests.post(aks_version_url, json=data_test,
verify=False, headers=headers)
assert response.status_code == 200
assert response.content is not None
print("JSON RESPONSE IS")
print(response.content)
json_response = json.loads(response.content)
if multiple_versions and len(json_response) > 1:
aksclusterversion = [json_response[0], json_response[-1]]
else:
aksclusterversion = json_response[-1]
else:
aksclusterversion = AKS_CLUSTER_VERSION
print(aksclusterversion)
return aksclusterversion
def get_aks_config(version):
aks_config = {
"azureKubernetesServiceConfig": {
"adminUsername": "azureuser",
"agentPoolName": "rancher",
"agentVmSize": "Standard_D2_v2",
"clientId": CLIENT_ID,
"clientSecret": SECRET_KEY,
"count": 3,
"dnsServiceIp": None,
"dockerBridgeCidr": None,
"kubernetesVersion": version,
"location": AKS_REGION,
"osDiskSizeGb": 100,
"resourceGroup": RESOURCE_GROUP,
"serviceCidr": None,
"sshPublicKeyContents": SSH_KEY,
"subnet": None,
"subscriptionId": SUBSCRIPTION_ID,
"tenantId": TENANT_ID,
"type": "azureKubernetesServiceConfig",
"virtualNetwork": None,
"virtualNetworkResourceGroup": None,
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
},
"name": random_test_name("test-auto-aks"),
"type": "cluster"
}
return aks_config
| 3,647 | 34.417476 | 78 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_certificate.py
|
"""
This test suite contains tests to validate certificate create/edit/delete with
different possible way and with different roles of users.
Test requirement:
Below Env variables need to be set
CATTLE_TEST_URL - url to rancher server
ADMIN_TOKEN - Admin token from rancher
USER_TOKEN - User token from rancher
RANCHER_CLUSTER_NAME - Cluster name to run test on
RANCHER_VALID_TLS_KEY - takes authentic certificate key base64 encoded
RANCHER_VALID_TLS_CERT - takes authentic certificate base64 encoded
RANCHER_BYO_TLS_KEY - takes self signed certificate key base64 encoded
RANCHER_BYO_TLS_CERT - takes self signed certificate base64 encoded
AWS_HOSTED_ZONE_ID - Zone Id in AWS route53 where route53 will be created.
RANCHER_TEST_RBAC - To enable rbac tests
"""
from .common import (ApiError, CLUSTER_MEMBER, CLUSTER_OWNER, create_kubeconfig,
create_ns, create_project_and_ns,
get_cluster_client_for_token, get_project_client_for_token,
get_user_client, get_user_client_and_cluster, if_test_rbac,
PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY,
random_test_name, rbac_get_namespace, rbac_get_project,
rbac_get_user_token_by_role, TEST_IMAGE, USER_TOKEN,
validate_ingress_using_endpoint, validate_workload,
wait_for_ingress_to_active, base64, TEST_IMAGE_PORT)
from lib.aws import AmazonWebServices
from pathlib import Path
import pytest
import os
import time
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None,
"c_client": None, "cert_valid": None, "cert_ssc": None,
"cert_allns_valid": None, "cert_allns_ssc": None, "node_id": None}
route_entry_53_1 = random_test_name('auto-valid') + '.qa.rancher.space'
route_entry_53_2 = random_test_name('auto-ssc') + '.qa.rancher.space'
def get_ssh_key(ssh_key_name):
home = str(Path.home())
path = '{}/.ssh/{}'.format(home, ssh_key_name)
if os.path.exists(path):
with open(path, 'r') as f:
ssh_key = f.read()
return ssh_key
def get_private_key(env_var, key_name):
key = os.environ.get(env_var)
if key is not None:
return base64.b64decode(key).decode("utf-8")
else:
return get_ssh_key(key_name)
rancher_private_key = get_private_key('RANCHER_VALID_TLS_KEY',
'privkey.pem')
rancher_cert = get_private_key('RANCHER_VALID_TLS_CERT', 'fullchain.pem')
rancher_ssc_private_key = get_private_key('RANCHER_BYO_TLS_KEY',
'key.pem')
rancher_ssc_cert = get_private_key('RANCHER_BYO_TLS_CERT', 'cert.pem')
rbac_role_list = [
CLUSTER_OWNER,
CLUSTER_MEMBER,
PROJECT_OWNER,
PROJECT_MEMBER,
PROJECT_READ_ONLY
]
@pytest.mark.usefixtures("create_project_client")
class TestCertificate:
@pytest.fixture(autouse="True")
def certificate_test_setup(self):
"""
Test set up which runs before and after all the tests in the class
Creates Workload_2 if required and delete all the workload and ingres
created after test execution.
"""
self.p_client = namespace["p_client"]
self.ns = namespace["ns"]
self.c_client = namespace["c_client"]
self.cluster = namespace["cluster"]
self.project = namespace["project"]
self.certificate_valid = namespace["cert_valid"]
self.certificate_ssc = namespace["cert_ssc"]
self.certificate_all_ns_valid = namespace["cert_allns_valid"]
self.certificate_all_ns_ssc = namespace["cert_allns_ssc"]
self.node_id = namespace["node_id"]
wl_name = random_test_name("workload-test")
wl_con = [{"name": "wk1-test",
"image": TEST_IMAGE}]
scheduling = {"node": {"nodeId": self.node_id}}
self.workload = self.p_client.create_workload(
name=wl_name, containers=wl_con, namespaceId=self.ns.id,
scheduling=scheduling
)
self.ingress = None
self.workload_2 = None
yield
self.p_client.delete(self.workload)
if self.workload_2 is not None:
self.p_client.delete(self.workload_2)
if self.ingress is not None:
self.p_client.delete(self.ingress)
def test_certificate_create_validcert_for_single_ns(self):
"""
Test steps:
1. Validate the workload available in ns-certificate namespace
2. Create an ingress including trusted certificate scoped for current
namespace and route53 host.
3. validate the ingress using endpoint
"""
ingress_name = random_test_name("ingress-test")
host = route_entry_53_1
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]}
tls = {"certificateId": self.certificate_valid.id, "hosts": [host]}
validate_workload(self.p_client, self.workload, "deployment",
self.ns.name)
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule], tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True)
def test_certificate_create_validcert_for_all_ns(self):
"""
Test steps:
1. Validate the workload available in ns-certificate namespace
2. Create an ingress including trusted certificate scoped for all
namespace and route53 host.
3. validate the ingress using endpoint
"""
ingress_name = random_test_name("ingress-test")
host = route_entry_53_1
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]
}
tls = {"certificateId": self.certificate_all_ns_valid.id,
"hosts": [host]
}
validate_workload(self.p_client, self.workload, "deployment",
self.ns.name)
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule], tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True)
def test_certificate_create_validcert_for_all_ns_2(self):
"""
Test steps:
1. Create a namespace
2. Create a workload in namespace created above.
3. Validate the workload.
4. Create an ingress including trusted certificate scoped for all
namespace and route53 host.
5. validate the ingress using endpoint
"""
wl_name = random_test_name("workload-test")
wl_con = [{"name": "wk2-test",
"image": TEST_IMAGE}]
scheduling = {"node": {"nodeId": self.node_id}}
ns_2 = create_ns(self.c_client, self.cluster, self.project)
self.workload_2 = self.p_client.create_workload(
name=wl_name, containers=wl_con, namespaceId=ns_2.id,
scheduling=scheduling
)
validate_workload(self.p_client, self.workload_2, "deployment",
ns_2.name)
ingress_name = random_test_name("ingress-test")
host = route_entry_53_1
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload_2.id],
"targetPort": TEST_IMAGE_PORT}]
}
tls = {"certificateId": self.certificate_all_ns_valid.id,
"hosts": [host]
}
self.ingress = self.p_client.create_ingress(
name="{}-2".format(ingress_name), namespaceId=ns_2.id,
rules=[rule], tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload_2], certcheck=True)
def test_certificate_create_ssc_for_single_ns(self):
"""
Test steps:
1. Validate the workload available in ns-certificate namespace
2. Create an ingress including self signed certificate scoped for
current namespace and route53 host.
3. validate the ingress using endpoint
"""
validate_workload(self.p_client, self.workload, "deployment",
self.ns.name)
ingress_name = random_test_name("ingress-test")
host = route_entry_53_2
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]}
tls = {"certificateId": self.certificate_ssc.id, "hosts": [host]}
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule], tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
# validate_ingress(host, path)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True,
is_insecure=True
)
def test_certificate_create_ssc_for_all_ns(self):
"""
Test steps:
1. Validate the workload available in ns-certificate namespace
2. Create an ingress including self signed certificate scoped for
all namespace and route53 host.
3. validate the ingress using endpoint
"""
ingress_name = random_test_name("ingress-test")
host = route_entry_53_2
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]
}
tls = {"certificateId": self.certificate_all_ns_ssc.id, "hosts": [host]}
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule], tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True,
is_insecure=True
)
def test_certificate_create_ssc_for_all_ns_2(self):
"""
Test steps:
1. Create a namespace
2. Create a workload in namespace created above.
3. Validate the workload.
4. Create an ingress including trusted certificate scoped for all
namespace and route53 host.
5. validate the ingress using endpoint
"""
wl_name = random_test_name("workload-test")
wl_con = [{"name": "wk2-test",
"image": TEST_IMAGE}]
scheduling = {"node": {"nodeId": self.node_id}}
ns_2 = create_ns(self.c_client, self.cluster, self.project)
self.workload_2 = self.p_client.create_workload(
name=wl_name, containers=wl_con, namespaceId=ns_2.id,
scheduling=scheduling
)
validate_workload(self.p_client, self.workload_2, "deployment",
ns_2.name)
ingress_name = random_test_name("ingress-test")
host = route_entry_53_2
path = "/name.html"
rule = {"host": host,
"paths": [{"path": path, "workloadIds": [self.workload_2.id],
"targetPort": TEST_IMAGE_PORT}]
}
tls = {"certificateId": self.certificate_all_ns_ssc.id, "hosts": [host]}
self.ingress = self.p_client.create_ingress(
name="{}-2".format(ingress_name), namespaceId=ns_2.id, rules=[rule],
tls=[tls])
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload_2], certcheck=True,
is_insecure=True
)
def test_certificate_edit_ssc_to_valid_for_single_ns(self):
"""
Test steps:
1. Create an ingress pointing to self signed certificate scoped to
current namespace.
2. Update the certificate key to trusted.
3. Reload the certificate.
4. Update the ingress.
5. validate the ingress using endpoint.
"""
ingress_name = random_test_name("ingress-test")
host_1 = route_entry_53_2
host_2 = route_entry_53_1
path = "/name.html"
rule_1 = {"host": host_1,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]}
rule_2 = {"host": host_2,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]}
tls = {"certificateId": self.certificate_ssc.id, "hosts": [host_1]}
tls_2 = {"certificateId": self.certificate_ssc.id, "hosts": [host_2]}
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule_1],
tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
self.p_client.update(
self.certificate_ssc, key=rancher_private_key, certs=rancher_cert
)
self.p_client.reload(self.certificate_ssc)
self.p_client.update(self.ingress, rules=[rule_2], tls=[tls_2])
self.p_client.reload(self.ingress)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True)
def test_certificate_edit_ssc_to_valid_cert_for_all_ns(self):
"""
Test steps:
1. Create an ingress pointing to self signed certificate scoped to
all namespace.
2. Update the certificate key to trusted.
3. Reload the certificate.
4. Update the ingress.
5. validate the ingress using endpoint.
"""
ingress_name = random_test_name("ingress-test")
host_1 = route_entry_53_2
host_2 = route_entry_53_1
path = "/name.html"
rule_1 = {"host": host_1,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]
}
rule_2 = {"host": host_2,
"paths": [{"path": path, "workloadIds": [self.workload.id],
"targetPort": TEST_IMAGE_PORT}]
}
tls = {"certificateId": self.certificate_all_ns_ssc.id,
"hosts": [host_1]}
tls_2 = {"certificateId": self.certificate_all_ns_ssc.id,
"hosts": [host_2]}
self.ingress = self.p_client.create_ingress(
name=ingress_name, namespaceId=self.ns.id, rules=[rule_1],
tls=[tls]
)
wait_for_ingress_to_active(self.p_client, self.ingress)
self.p_client.update(
self.certificate_all_ns_ssc, key=rancher_private_key,
certs=rancher_cert
)
self.p_client.reload(self.certificate_all_ns_ssc)
self.p_client.update(self.ingress, rules=[rule_2], tls=[tls_2])
self.p_client.reload(self.ingress)
wait_for_ingress_to_active(self.p_client, self.ingress)
validate_ingress_using_endpoint(
self.p_client, self.ingress, [self.workload], certcheck=True)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_create_certificate(self, role):
"""
Test steps:
1. Create certificate all namespace for all role
2. Delete the certificate
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, token)
cert_name = random_test_name("cert-rbac")
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.create_certificate(
name=cert_name, key=rancher_private_key,
certs=rancher_cert
)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
certificate_allns_valid = p_client.create_certificate(
name=cert_name, key=rancher_private_key,
certs=rancher_cert
)
assert certificate_allns_valid.issuer == 'R3'
# Delete the certificate
p_client.delete(certificate_allns_valid)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_create_namespaced_certificate(self, role):
"""
Test steps:
1. Create certificate for single namespace for all role
2. Delete the certificate
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
cert_name = random_test_name("cert-rbac")
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.create_namespaced_certificate(
name=cert_name, key=rancher_private_key,
certs=rancher_cert,
namespaceId=ns['name']
)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
certificate_valid = p_client.create_namespaced_certificate(
name=cert_name, key=rancher_private_key, certs=rancher_cert,
namespaceId=ns['name']
)
assert certificate_valid.issuer == 'R3'
# Delete the certificate
p_client.delete(certificate_valid)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_list_namespaced_certificate(self, role):
"""
Test steps:
1. Create certificate for single namespace for all role as
cluster owner
2. List the created certificate for all roles
3. Delete the certificate
"""
c_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
p_client_owner = get_project_client_for_token(project, c_owner_token)
cert_name = random_test_name("cert-rbac")
certificate_valid = p_client_owner.create_namespaced_certificate(
name=cert_name, key=rancher_private_key, certs=rancher_cert,
namespaceId=ns['name']
)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
cert_count = p_client.list_namespaced_certificate(name=cert_name)
assert len(cert_count) == 0, '{} is able to list the ' \
'certificate'.format(role)
else:
cert_count = p_client.list_namespaced_certificate(name=cert_name)
assert len(cert_count) > 0, "{} couldn't to list the " \
"certificate".format(role)
# Delete the resources
p_client.delete(certificate_valid)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_list_certificate(self, role):
"""
Test steps:
1. Create certificate for all namespace for all role as
cluster owner
2. List the created certificate for all roles
3. Delete the certificate
"""
c_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, token)
p_client_owner = get_project_client_for_token(project,
c_owner_token)
cert_name = random_test_name("cert-rbac")
certificate_allns_valid = p_client_owner.create_certificate(
name=cert_name, key=rancher_private_key,
certs=rancher_cert
)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
cert_count = p_client.list_certificate(name=cert_name)
assert len(cert_count) == 0, '{} is able to list the ' \
'certificate'.format(role)
else:
cert_count = p_client.list_certificate(name=cert_name)
assert len(cert_count) > 0, "{} couldn't to list the " \
"certificate".format(role)
# Delete the resources
p_client.delete(certificate_allns_valid)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_edit_certificate(self, role):
"""
Test steps:
1. Create certificate for single and all namespace for all role as
cluster owner
2. Update the created certificate for all roles
3. Delete the certificate
"""
c_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, token)
p_client_owner = get_project_client_for_token(project,
c_owner_token)
cert_name = random_test_name("cert-rbac")
certificate_allns_valid = p_client_owner.create_certificate(
name=cert_name, key=rancher_private_key,
certs=rancher_cert
)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.update(
certificate_allns_valid, key=rancher_ssc_private_key,
certs=rancher_ssc_cert)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
certificate_allns_valid = p_client.update(
certificate_allns_valid, key=rancher_ssc_private_key,
certs=rancher_ssc_cert)
p_client.reload(certificate_allns_valid)
assert certificate_allns_valid.issuer == 'Rancher QA CA'
# Delete the resources
p_client.delete(certificate_allns_valid)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_edit_namespaced_certificate(self, role):
"""
Test steps:
1. Create certificate for single namespace for all role as
cluster owner
2. Update the created certificate for all roles
3. Delete the certificate
"""
c_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
p_client_owner = get_project_client_for_token(project,
c_owner_token)
cert_name = random_test_name("cert-rbac")
certificate_valid = p_client_owner.create_namespaced_certificate(
name=cert_name, key=rancher_private_key, certs=rancher_cert,
namespaceId=ns['name']
)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.update(certificate_valid, key=rancher_ssc_private_key,
certs=rancher_ssc_cert)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
certificate_valid = p_client.update(
certificate_valid, key=rancher_ssc_private_key,
certs=rancher_ssc_cert)
p_client.reload(certificate_valid)
assert certificate_valid.issuer == 'Rancher QA CA'
# Delete the resources
p_client.delete(certificate_valid)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_delete_certificate(self, role):
"""
Test steps:
1. Create certificate for single and all namespace for all role as
cluster owner
2. Delete the certificate as different roles.
"""
c_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, token)
p_client_owner = get_project_client_for_token(project,
c_owner_token)
cert_name = random_test_name("cert-rbac")
certificate_allns_valid = p_client_owner.create_certificate(
name=cert_name, key=rancher_private_key,
certs=rancher_cert
)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.delete(certificate_allns_valid)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
p_client_owner.delete(certificate_allns_valid)
else:
p_client.delete(certificate_allns_valid)
time.sleep(2)
cert_count = p_client.list_certificate(name=cert_name)
assert len(cert_count) == 0, '{} is not able to delete the ' \
'certificate'.format(role)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_delete_namespaced_certificate(self, role):
"""
Test steps:
1. Create certificate for single namespace for all role as
cluster owner
2. Delete the certificate as different roles.
"""
c_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
p_client_owner = get_project_client_for_token(project,
c_owner_token)
cert_name = random_test_name("cert-rbac")
certificate_valid = p_client_owner.create_namespaced_certificate(
name=cert_name, key=rancher_private_key, certs=rancher_cert,
namespaceId=ns['name']
)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.delete(certificate_valid)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
p_client_owner.delete(certificate_valid)
else:
p_client.delete(certificate_valid)
time.sleep(2)
cert_count = p_client.list_namespaced_certificate(name=cert_name)
assert len(cert_count) == 0, '{} is not able to delete the ' \
'certificate'.format(role)
@if_test_rbac
@pytest.mark.parametrize("role", [PROJECT_OWNER, PROJECT_MEMBER])
def test_list_certificate_cross_project(self, role):
"""
Test steps:
1. List the created all namespaced certificate present in
Test-certificate project by test-certificate project owner and the
users created by rbac test set up.
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, token)
default_p_client = self.p_client
cert_count_by_role = p_client.list_certificate(name='cert-all-ns-valid')
cert_count_default = default_p_client.list_certificate(
name='cert-all-ns-valid')
assert len(cert_count_default) > 0, "{} couldn't to list the " \
"certificate".format(role)
assert len(cert_count_by_role) == 0, "{} could list certificate in " \
"'Test Certificate' project."
@if_test_rbac
@pytest.mark.parametrize("role", [PROJECT_OWNER, PROJECT_MEMBER])
def test_list_ns_certificate_cross_project(self, role):
"""
Test steps:
1. List the created certificate present in Test-certificate project
by test-certificate project owner and the users created by rbac test
set up.
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, token)
default_p_client = self.p_client
cert_count_by_role = p_client.list_namespaced_certificate(
name='cert-valid')
cert_count_default = default_p_client.list_namespaced_certificate(
name='cert-valid')
assert len(cert_count_default) > 0, "{} couldn't to list the " \
"certificate".format(role)
assert len(cert_count_by_role) == 0, "{} could list certificate in " \
"'Test Certificate' project."
@if_test_rbac
@pytest.mark.parametrize("role", [PROJECT_OWNER, PROJECT_MEMBER])
def test_edit_namespaced_certificate_cross_project(self, role):
"""
Test steps:
1. Update the created certificate present in Test-certificate project
by the users created by rbac test set up.
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, token)
certificate_valid = self.certificate_ssc
with pytest.raises(ApiError) as e:
p_client.update(certificate_valid, key=rancher_private_key,
certs=rancher_cert)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", [PROJECT_OWNER, PROJECT_MEMBER])
def test_edit_certificate_cross_project(self, role):
"""
Test steps:
1. Update the created certificate present in Test-certificate project
by the users created by rbac test set up.
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, token)
certificate_valid = self.certificate_all_ns_ssc
with pytest.raises(ApiError) as e:
p_client.update(certificate_valid, key=rancher_private_key,
certs=rancher_cert)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
project, ns = create_project_and_ns(USER_TOKEN, cluster,
project_name="test-certificate",
ns_name="ns-certificate")
p_client = get_project_client_for_token(project, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
certificate_valid = p_client.create_namespaced_certificate(
name="cert-valid", key=rancher_private_key, certs=rancher_cert,
namespaceId=ns['name']
)
assert certificate_valid.issuer == 'R3'
certificate_allns_valid = p_client.create_certificate(
name="cert-all-ns-valid", key=rancher_private_key,
certs=rancher_cert
)
certificate_ssc = p_client.create_namespaced_certificate(
name="cert-ssc", key=rancher_ssc_private_key, certs=rancher_ssc_cert,
namespaceId=ns['name']
)
assert certificate_ssc.issuer == 'Rancher QA CA'
certificate_allns_ssc = p_client.create_certificate(
name="cert-all-ns-ssc", key=rancher_ssc_private_key,
certs=rancher_ssc_cert
)
nodes = client.list_node(clusterId=cluster.id).data
node_ip, node_id = None, None
for i in range(len(nodes)):
if nodes[i].worker:
node_ip = nodes[i].externalIpAddress
node_id = nodes[i].nodePoolId
break
aws_services = AmazonWebServices()
aws_services.upsert_route_53_record_cname(
route_entry_53_1, node_ip, record_type='A', record_ttl=60)
aws_services.upsert_route_53_record_cname(
route_entry_53_2, node_ip, record_type='A', record_ttl=60)
namespace["p_client"] = p_client
namespace["c_client"] = c_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = project
namespace["cert_valid"] = certificate_valid
namespace["cert_ssc"] = certificate_ssc
namespace["cert_allns_valid"] = certificate_allns_valid
namespace["cert_allns_ssc"] = certificate_allns_ssc
namespace["node_id"] = node_id
def fin():
client = get_user_client()
client.delete(namespace["project"])
aws_services.upsert_route_53_record_cname(
route_entry_53_1, node_ip, action='DELETE', record_type='A',
record_ttl=60)
aws_services.upsert_route_53_record_cname(
route_entry_53_2, node_ip, action='DELETE', record_type='A',
record_ttl=60)
request.addfinalizer(fin)
| 34,024 | 42.51023 | 80 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_sa.py
|
import pytest
from .common import * # NOQA
do_test_sa = \
ast.literal_eval(os.environ.get('RANCHER_SA_CHECK', "True"))
if_test_sa = pytest.mark.skipif(
do_test_sa is not True,
reason="This test should not be executed on imported clusters")
@if_test_sa
def test_sa_for_user_clusters():
cmd = "get serviceaccounts -n default"
out = execute_kubectl_cmd(cmd, False, False)
assert "netes-default" not in out
cmd = "get serviceaccounts -n cattle-system"
out = execute_kubectl_cmd(cmd, False, False)
assert "kontainer-engine" in out
@pytest.fixture(scope='module', autouse="True")
def create_cluster_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
| 737 | 26.333333 | 67 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_bkp_restore_local.py
|
import pytest
from rancher import ApiError
from .common import * # NOQA
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None,
"nodes": []}
backup_info = {"backupname": None, "backup_id": None, "workload": None,
"backupfilename": None, "etcdbackupdata": None}
rbac_roles = [CLUSTER_MEMBER, PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY]
def test_bkp_restore_local_create():
validate_backup_create(namespace, backup_info)
def test_bkp_restore_local_restore():
ns , binfo = validate_backup_create(namespace, backup_info)
validate_backup_restore(ns, binfo)
def test_bkp_restore_local_delete():
ns , binfo = validate_backup_create(namespace, backup_info)
ns, binfo = validate_backup_restore(ns, binfo)
validate_backup_delete(ns, binfo)
@if_test_rbac
def test_rbac_bkp_restore_create_cluster_owner():
""" Only cluster-owner should be allowed to create backups """
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
user_client = get_client_for_token(user_token)
user_cluster = user_client.list_cluster(name=CLUSTER_NAME).data[0]
backup = user_cluster.backupEtcd()
backupname = backup['metadata']['name']
wait_for_backup_to_active(user_cluster, backupname)
assert len(user_cluster.etcdBackups(name=backupname)) == 1
@if_test_rbac
@pytest.mark.parametrize("role", rbac_roles)
def test_rbac_bkp_restore_create(role):
"""
Only cluster-owner should be allowed to create backups
unprivileged user should get 403 PermissionDenied
"""
user_token = rbac_get_user_token_by_role(role)
user_client = get_client_for_token(user_token)
user_cluster = user_client.list_cluster(name=CLUSTER_NAME).data[0]
with pytest.raises(ApiError) as e:
user_cluster.backupEtcd()
assert e.value.error.status == 403
assert e.value.error.code == 'PermissionDenied'
@if_test_rbac
def test_rbac_bkp_restore_list_cluster_owner():
""" Only cluster-owner should be allowed to list backups """
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
user_client = get_client_for_token(user_token)
user_cluster = user_client.list_cluster(name=CLUSTER_NAME).data[0]
backup = user_cluster.backupEtcd()
backupname = backup['metadata']['name']
assert len(user_cluster.etcdBackups(name=backupname)) == 1
@if_test_rbac
@pytest.mark.parametrize("role", rbac_roles)
def test_rbac_bkp_restore_list(role):
"""
unprivileged user should not be allowed to list backups
cluster etcdBackups() should always return length zero
"""
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
user_client = get_client_for_token(user_token)
user_cluster = user_client.list_cluster(name=CLUSTER_NAME).data[0]
backup = user_cluster.backupEtcd()
backupname = backup['metadata']['name']
assert len(user_cluster.etcdBackups(name=backupname)) == 1
wait_for_backup_to_active(user_cluster, backupname)
user_token2 = rbac_get_user_token_by_role(role)
user_client = get_client_for_token(user_token2)
user_cluster2 = user_client.list_cluster(name=CLUSTER_NAME).data[0]
assert len(user_cluster2.etcdBackups()) == 0
@if_test_rbac
@pytest.mark.parametrize("role", rbac_roles)
def test_rbac_bkp_restore_restore(role):
"""
unprivileged user should not be allowed to restore backups
"""
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
user_client = get_client_for_token(user_token)
user_cluster = user_client.list_cluster(name=CLUSTER_NAME).data[0]
backup = user_cluster.backupEtcd()
backupname = backup['metadata']['name']
etcdbackup = user_cluster.etcdBackups(name=backupname)
backup_id = etcdbackup['data'][0]['id']
wait_for_backup_to_active(user_cluster, backupname)
user_token2 = rbac_get_user_token_by_role(role)
user_client2 = get_client_for_token(user_token2)
user_cluster2 = user_client2.list_cluster(name=CLUSTER_NAME).data[0]
with pytest.raises(ApiError) as e:
user_cluster2.restoreFromEtcdBackup(etcdBackupId=backup_id)
assert e.value.error.status == 403
assert e.value.error.code == 'PermissionDenied'
@if_test_rbac
def test_rbac_bkp_restore_delete_cluster_owner():
""" Only cluster-owner should be allowed to delete backups """
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
user_client = get_client_for_token(user_token)
user_cluster = user_client.list_cluster(name=CLUSTER_NAME).data[0]
backup = user_cluster.backupEtcd()
backupname = backup['metadata']['name']
wait_for_backup_to_active(user_cluster, backupname)
assert len(user_cluster.etcdBackups(name=backupname)) == 1
user_client.delete(
user_cluster.etcdBackups(name=backupname)['data'][0]
)
wait_for_backup_to_delete(user_cluster, backupname)
assert len(user_cluster.etcdBackups(name=backupname)) == 0
@if_test_rbac
@pytest.mark.parametrize("role", rbac_roles)
def test_rbac_bkp_restore_delete(role):
"""
Only cluster-owner should be allowed to delete backups
unprivileged user shouldn't be allowed to delete
"""
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
user_client = get_client_for_token(user_token)
user_cluster = user_client.list_cluster(name=CLUSTER_NAME).data[0]
backup = user_cluster.backupEtcd()
backupname = backup['metadata']['name']
wait_for_backup_to_active(user_cluster, backupname)
user_token2 = rbac_get_user_token_by_role(role)
user_client2 = get_client_for_token(user_token2)
user_cluster2 = user_client2.list_cluster(name=CLUSTER_NAME).data[0]
assert len(user_cluster2.etcdBackups(name=backupname)) == 0
backup_to_delete = user_cluster.etcdBackups(name=backupname)['data'][0]
with pytest.raises(ApiError) as e:
user_client2.delete(backup_to_delete)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testsecret")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
def fin():
client = get_user_client()
client.delete(p)
request.addfinalizer(fin)
| 6,579 | 37.934911 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_configmaps.py
|
from .common import * # NOQA
from rancher import ApiError
CLUSTER_NAME = os.environ.get("CLUSTER_NAME", "")
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
def test_cmap_create_single_ns_volume():
"""
Create a configmap.Create and validate workload using
the configmap as a volume. Create and validate the workload with config
map as environment variable
"""
p_client = namespace["p_client"]
ns = namespace["ns"]
value = "valueall"
keyvaluepair = {"testall": value}
configmap = create_configmap(keyvaluepair, p_client, ns)
# Create workloads with configmap in existing namespace
create_and_validate_workload_with_configmap_as_volume(p_client, configmap,
ns, keyvaluepair)
def test_cmap_create_single_ns_env_variable():
"""
Create a configmap.Create and validate workload using
the configmap as a volume. Create and validate the workload with config
map as environment variable
"""
p_client = namespace["p_client"]
ns = namespace["ns"]
value = "valueall"
keyvaluepair = {"testall": value}
configmap = create_configmap(keyvaluepair, p_client, ns)
# Create workloads with configmap in existing namespace
create_and_validate_workload_with_configmap_as_env_variable(p_client,
configmap,
ns,
keyvaluepair)
def test_cmap_delete_single_ns():
# Create a configmap and delete the configmap
p_client = namespace["p_client"]
ns = namespace["ns"]
value = "valuetest"
keyvaluepair = {"testalldelete": value}
configmap = create_configmap(keyvaluepair, p_client, ns)
delete_configmap(p_client, configmap, ns, keyvaluepair)
def test_cmap_edit_single_ns():
"""
Create a configmap and update the configmap.
Create and validate workload using the updated configmap
"""
p_client = namespace["p_client"]
ns = namespace["ns"]
name = random_test_name("default")
value = "valueall"
keyvaluepair = {"testall": value}
configmap = create_configmap(keyvaluepair, p_client, ns)
value1 = ("valueall")
value2 = ("valueallnew")
updated_dict = {"testall": value1, "testallnew": value2}
updated_configmap = p_client.update(configmap, name=name,
namespaceId=ns['name'],
data=updated_dict)
updatedconfigmapdata = updated_configmap['data']
assert updatedconfigmapdata.data_dict() == updated_dict
# Create a workload with the updated configmap in the existing namespace
create_and_validate_workload_with_configmap_as_volume(p_client, configmap,
ns,
updatedconfigmapdata)
create_and_validate_workload_with_configmap_as_env_variable(
p_client, configmap, ns, updatedconfigmapdata)
# Cluster member is not added in the role list below as this role does not
# have access to the rbac created project and he has to create his own project
rbac_role_list = [
(CLUSTER_OWNER),
(PROJECT_OWNER),
(PROJECT_MEMBER),
]
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_cmap_create(role):
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
rbac_configmap_create(p_client, ns)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_cmap_edit(role):
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
rbac_configmap_edit(p_client, ns)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_cmap_delete(role):
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
rbac_configmap_delete(p_client, ns)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_cmap_list(remove_resource, role):
cluster_owner_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
configmap = rbac_configmap_create(cluster_owner_p_client, ns)
cmapname = configmap["name"]
cmapdict = cluster_owner_p_client.list_configMap(name=cmapname)
print(cmapdict)
assert cmapdict.resourceType == "configMap"
configmapdata = cmapdict.get('data')
assert len(configmapdata) == 1
assert configmapdata[0].name == cmapname
remove_resource(configmap)
@if_test_rbac
def test_rbac_cmap_cluster_member_create(remove_resource):
"""
Verify cluster member can create config map and deploy workload
using config map
"""
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
rbac_configmap_create(p_client, ns)
remove_resource(project)
@if_test_rbac
def test_rbac_cmap_cluster_member_edit(remove_resource):
"""
Verify cluster member can create config map and deploy workload
using config map
"""
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
rbac_configmap_edit(p_client, ns)
remove_resource(project)
@if_test_rbac
def test_rbac_cmap_cluster_member_list(remove_resource):
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
configmap = rbac_configmap_create(p_client, ns)
cmapname = configmap["name"]
print(cmapname)
cmapdict = p_client.list_configMap(name=cmapname)
print(cmapdict)
assert cmapdict.resourceType == "configMap"
configmapdata = cmapdict.get('data')
assert len(configmapdata) == 1
assert configmapdata[0].name == cmapname
remove_resource(project)
remove_resource(configmap)
@if_test_rbac
def test_rbac_cmap_cluster_member_delete(remove_resource):
"""
Verify cluster member can create config map and deploy workload
using config map
"""
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
project, ns = create_project_and_ns(user_token, namespace["cluster"],
random_test_name("rbac-cluster-mem"))
p_client = get_project_client_for_token(project, user_token)
rbac_configmap_delete(p_client, ns)
remove_resource(project)
@if_test_rbac
def test_rbac_cmap_project_readonly_member_create():
project = rbac_get_project()
ns = rbac_get_namespace()
user_token1 = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project, user_token1)
value = "valueall"
keyvaluepair = {"testall": value}
# Read Only member cannot create config maps
with pytest.raises(ApiError) as e:
create_configmap(keyvaluepair, readonly_user_client, ns)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_cmap_project_readonly_member_edit(remove_resource):
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
ns = rbac_get_namespace()
user_token1 = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project, user_token1)
value = "valueall"
keyvaluepair = {"testall": value}
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
# As a cluster owner, create a config map
configmap = create_configmap(keyvaluepair, cluster_owner_p_client, ns)
# Readonly member cannot edit configmap
value1 = ("valueall")
value2 = ("valueallnew")
updated_dict = {"testall": value1, "testallnew": value2}
with pytest.raises(ApiError) as e:
readonly_user_client.update(configmap,
namespaceId=ns['name'],
data=updated_dict)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(configmap)
@if_test_rbac
def test_rbac_cmap_project_readonly_delete(remove_resource):
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
ns = rbac_get_namespace()
user_token1 = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project, user_token1)
value = "valueall"
keyvaluepair = {"testall": value}
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
# As a cluster owner, create a config map
configmap = create_configmap(keyvaluepair, cluster_owner_p_client, ns)
# Assert read-only user cannot delete the config map
with pytest.raises(ApiError) as e:
delete_configmap(readonly_user_client, configmap, ns, keyvaluepair)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(configmap)
@if_test_rbac
def test_rbac_cmap_readonly_list(remove_resource):
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
project = rbac_get_project()
ns = rbac_get_namespace()
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
configmap = rbac_configmap_create(cluster_owner_p_client, ns)
p_client = get_project_client_for_token(project, user_token)
cmapname = configmap["name"]
cmapdict = p_client.list_configMap(name=cmapname)
print(cmapdict)
assert cmapdict.resourceType == "configMap"
configmapdata = cmapdict.get('data')
assert len(configmapdata) == 1
assert configmapdata[0].name == cmapname
remove_resource(configmap)
def rbac_configmap_create(p_client, ns):
"""
Verify creating, editing and deleting config maps is functional.
The p_client passed as the parameter would be as per the role assigned
"""
value = "valueall"
keyvaluepair = {"testall": value}
configmap = create_configmap(keyvaluepair, p_client, ns)
# Create workloads with configmap in existing namespace
create_and_validate_workload_with_configmap_as_volume(p_client, configmap,
ns, keyvaluepair)
return configmap
def rbac_configmap_edit(p_client, ns):
"""
Verify creating, editing and deleting config maps is functional.
The p_client passed as the parameter would be as per the role assigned
"""
value = "valueall"
keyvaluepair = {"testall": value}
configmap = create_configmap(keyvaluepair, p_client, ns)
# Create workloads with configmap in existing namespace
create_and_validate_workload_with_configmap_as_volume(p_client, configmap,
ns, keyvaluepair)
# Verify editing of configmap in the project
value1 = ("valueall")
value2 = ("valueallnew")
updated_dict = {"testall": value1, "testallnew": value2}
updated_configmap = p_client.update(configmap,
namespaceId=ns['name'],
data=updated_dict)
updatedconfigmapdata = updated_configmap['data']
create_and_validate_workload_with_configmap_as_volume(p_client, configmap,
ns,
updatedconfigmapdata)
p_client.delete(updated_configmap)
def rbac_configmap_delete(p_client, ns):
"""
Verify creating, editing and deleting config maps is functional.
The p_client passed as the parameter would be as per the role assigned
"""
value = "valueall"
keyvaluepair = {"testall": value}
configmap = create_configmap(keyvaluepair, p_client, ns)
# Verify deletion of config map
delete_configmap(p_client, configmap, ns, keyvaluepair)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testconfigmap")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
def validate_workload_with_configmap(p_client, workload,
type, ns_name, keyvaluepair,
workloadwithconfigmapasvolume=False,
workloadwitconfigmapasenvvar=False,
podcount=1):
validate_workload(p_client, workload, type, ns_name, pod_count=podcount)
pod_list = p_client.list_pod(workloadId=workload.id).data
mountpath = "/test"
for i in range(0, len(keyvaluepair)):
key = list(keyvaluepair.keys())[i]
if workloadwithconfigmapasvolume:
key_file_in_pod = mountpath + "/" + key
print(key_file_in_pod)
command = "cat " + key_file_in_pod + ''
print(" Command to display configmap value from container is: ")
print(command)
result = kubectl_pod_exec(pod_list[0], command)
assert result.decode("utf-8") == (list(keyvaluepair.values())[i])
elif workloadwitconfigmapasenvvar:
command = 'env'
result = kubectl_pod_exec(pod_list[0], command)
print(list(keyvaluepair.values())[i])
if list(keyvaluepair.values())[i] in result.decode("utf-8"):
assert True
def delete_configmap(client, configmap, ns, keyvaluepair):
key = list(keyvaluepair.keys())[0]
print("Delete Configmap")
client.delete(configmap)
# Sleep to allow for the configmap to be deleted
time.sleep(5)
timeout = 30
configmapname = configmap.name
print("Config Map list after deleting config map")
configmapdict = client.list_configMap(name=configmapname)
start = time.time()
if len(configmapdict.get('data')) > 0:
testdata = configmapdict.get('data')
print("TESTDATA")
print(testdata[0]['data'])
while key in testdata[0]['data']:
if time.time() - start > timeout:
raise AssertionError("Timed out waiting for deletion")
time.sleep(.5)
configmapdict = client.list_configMap(name=configmapname)
testdata = configmapdict.get('data')
assert True
if len(configmapdict.get('data')) == 0:
assert True
# Verify configmap is deleted by "kubectl get configmap" command
command = " get configmap " + configmap['name'] + " --namespace=" + ns.name
print("Command to obtain the configmap")
print(command)
result = execute_kubectl_cmd(command, json_out=False, stderr=True)
print(result)
print("Verify that the configmap does not exist "
"and the error code returned is non zero ")
if result != 0:
assert True
def create_and_validate_workload_with_configmap_as_volume(p_client, configmap,
ns, keyvaluepair):
workload_name = random_test_name("test")
# Create Workload with configmap as volume
mountpath = "/test"
volumeMounts = [{"readOnly": False, "type": "volumeMount",
"mountPath": mountpath, "name": "vol1"}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts}]
configmapname = configmap['name']
volumes = [{"type": "volume", "name": "vol1",
"configMap": {"type": "configMapVolumeSource",
"defaultMode": 256,
"name": configmapname,
"optional": False}}]
workload = p_client.create_workload(name=workload_name,
containers=con,
namespaceId=ns.id, volumes=volumes)
validate_workload_with_configmap(p_client, workload, "deployment",
ns.name, keyvaluepair,
workloadwithconfigmapasvolume=True)
# Delete workload
p_client.delete(workload)
def create_and_validate_workload_with_configmap_as_env_variable(p_client,
configmap,
ns,
keyvaluepair):
workload_name = random_test_name("test")
# Create Workload with configmap as env variable
configmapname = configmap['name']
environmentdata = [{
"source": "configMap",
"sourceKey": None,
"sourceName": configmapname
}]
con = [{"name": "test",
"image": TEST_IMAGE,
"environmentFrom": environmentdata}]
workload = p_client.create_workload(name=workload_name,
containers=con,
namespaceId=ns.id)
validate_workload_with_configmap(p_client, workload, "deployment",
ns.name, keyvaluepair,
workloadwitconfigmapasenvvar=True)
# Delete workload
p_client.delete(workload)
def create_configmap(keyvaluepair, p_client=None, ns=None):
if p_client is None:
p_client = namespace["p_client"]
if ns is None:
ns = namespace["ns"]
name = random_test_name("testconfigmap")
configmap = p_client.create_configMap(name=name, namespaceId=ns['name'],
data=keyvaluepair)
assert configmap['baseType'] == "configMap"
print(configmap)
configdata = configmap['data']
assert configdata.data_dict() == keyvaluepair
return configmap
| 19,443 | 34.808471 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_eks_cluster.py
|
from .common import * # NOQA
import pytest
EKS_ACCESS_KEY = os.environ.get('RANCHER_EKS_ACCESS_KEY', "")
EKS_SECRET_KEY = os.environ.get('RANCHER_EKS_SECRET_KEY', "")
EKS_AMI = os.environ.get('RANCHER_EKS_AMI', "")
EKS_REGION = os.environ.get('RANCHER_EKS_REGION', "us-west-2")
EKS_K8S_VERSION = os.environ.get('RANCHER_EKS_K8S_VERSION', "1.17")
# Hardcoded to follow UI-style:
# https://github.com/rancher/ui/blob/master/lib/shared/addon/components/cluster-driver/driver-amazoneks/component.js
EKS_K8S_VERSIONS = os.environ.get('RANCHER_EKS_K8S_VERSIONS',
"1.17,1.16,1.15").split(",")
ekscredential = pytest.mark.skipif(not (EKS_ACCESS_KEY and EKS_SECRET_KEY),
reason='EKS Credentials not provided, '
'cannot create cluster')
@ekscredential
def test_create_eks_cluster():
client, cluster = create_and_validate_eks_cluster(EKS_K8S_VERSION)
cluster_cleanup(client, cluster)
def create_and_validate_eks_cluster(k8s_version):
client = get_user_client()
eks_config = get_eks_config(k8s_version)
print("Cluster creation")
cluster = client.create_cluster(eks_config)
print(cluster)
cluster = validate_cluster(client, cluster, check_intermediate_state=True,
skipIngresscheck=True)
return client, cluster
def get_eks_config(version):
amazon_config = {
"accessKey": EKS_ACCESS_KEY,
"secretKey": EKS_SECRET_KEY,
"instanceType": "m4.large",
"maximumNodes": 3,
"minimumNodes": 1,
"kubernetesVersion": version,
"region": EKS_REGION,
"subnets": [],
"type": "amazonElasticContainerServiceConfig",
"virtualNetwork": None,
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
}
if EKS_AMI is not None:
amazon_config.update({"ami": EKS_AMI})
# Generate the config for EKS cluster
eks_config = {
"amazonElasticContainerServiceConfig": amazon_config,
"name": random_test_name("test-auto-eks"),
"type": "cluster"
}
print("\nEKS Configuration")
print(eks_config)
return eks_config
| 2,230 | 31.333333 | 116 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_node_label.py
|
import pytest
import time
from .common import create_kubeconfig
from .common import CLUSTER_MEMBER
from .common import CLUSTER_OWNER
from .common import PROJECT_MEMBER
from .common import PROJECT_OWNER
from .common import PROJECT_READ_ONLY
from .common import get_client_for_token
from .common import delete_node
from .common import get_node_details
from .common import get_user_client
from .common import get_user_client_and_cluster
from .common import execute_kubectl_cmd
from .common import if_test_rbac
from .common import random_name
from .common import random_test_name
from .common import rbac_get_user_token_by_role
from .common import validate_cluster_state
from .common import wait_for_condition
from .conftest import wait_for_cluster_delete
from rancher import ApiError
from .test_rke_cluster_provisioning import DO_ACCESSKEY
from .test_rke_cluster_provisioning import evaluate_clustername
from .test_rke_cluster_provisioning import get_custom_host_registration_cmd
from .test_rke_cluster_provisioning import HOST_NAME
from .test_rke_cluster_provisioning import random_node_name
from .test_rke_cluster_provisioning import rke_config
from .test_rke_cluster_provisioning import wait_for_cluster_node_count
from lib.aws import AmazonWebServices
cluster_detail = {"cluster": None, "client": None}
cluster_node_template = {"cluster": None, "node_pools": None,
"node_template": None, "do_cloud_credential": None,
"label_value": None, "test_label": None}
cluster_custom = {"cluster": None, "test_label": None,
"label_value": None, "aws_node": None}
custom_cluster_add_edit = {"cluster": None, "aws_node": []}
cluster_node_template_2 = {"cluster": [], "node_template": []}
roles = [CLUSTER_MEMBER, CLUSTER_OWNER, PROJECT_OWNER, PROJECT_MEMBER,
PROJECT_READ_ONLY]
def test_node_label_add():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add label through API
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# delete label
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_edit():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add label through API
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# edit label through API
node = client.reload(node)
node_labels = node.labels.data_dict()
new_value = random_name()
node_labels[test_label] = new_value
client.update(node, labels=node_labels)
node = client.reload(node)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, new_value)
# delete label
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_delete():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
# add labels on node
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# delete label
del node_labels[test_label]
client.update(node, labels=node_labels)
time.sleep(2)
# label should be deleted
wait_for_condition(client, node, check_label_removed(test_label), None, 10)
validate_label_deleted_on_node(client, node, test_label)
def test_node_label_kubectl_add():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# add label on node
command = "label nodes " + node_name + " " + test_label + "=" + label_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# remove label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_kubectl_edit():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# add label on node
command = "label nodes " + node_name + " " + test_label + "=" + label_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# edit label through kubectl
new_value = random_name()
command = "label nodes " + node_name + " " + \
test_label + "=" + new_value + " --overwrite"
print(command)
execute_kubectl_cmd(command, False)
node = client.reload(node)
time.sleep(2)
# New Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value=new_value)
# remove label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_kubectl_delete():
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# add label on node
command = "label nodes " + node_name + " " + test_label + "=" + label_value
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# remove label through kubectl
command = " label node " + node_name + " " + test_label + "-"
execute_kubectl_cmd(command, False)
time.sleep(2)
# label should be deleted
wait_for_condition(client, node, check_label_removed(test_label), None, 10)
validate_label_deleted_on_node(client, node, test_label)
def test_node_label_k_add_a_delete_k_add():
"""Add via kubectl, Delete via API, Add via kubectl"""
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
command = "label nodes " + node_name + " " + test_label + "=" + label_value
print(command)
execute_kubectl_cmd(command, False)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# delete label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
# label should be deleted
wait_for_condition(client, node, check_label_removed(test_label), None, 10)
validate_label_deleted_on_node(client, node, test_label)
# Add label via kubectl
execute_kubectl_cmd(command, False)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# clean up label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_k_add_a_edit_k_edit():
"""Add via kubectl, edit via API, edit via kubectl"""
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
command = "label nodes " + node_name + " " + test_label + "=" + label_value
execute_kubectl_cmd(command, False)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# edit label through API
node = client.reload(node)
node_labels = node.labels.data_dict()
new_value = random_name()
node_labels[test_label] = new_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, new_value)
# edit label through kubectl
new_value_2 = random_name()
command = "label nodes " + node_name + " " + \
test_label + "=" + new_value_2 + " --overwrite"
print(command)
execute_kubectl_cmd(command, False)
time.sleep(2)
# New Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, new_value_2)
# remove label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_a_add_k_delete_a_add():
"""Add via API, Delete via kubectl, Add via API"""
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# delete label
command = " label node " + node_name + " " + test_label + "-"
execute_kubectl_cmd(command, False)
time.sleep(2)
# label should be deleted
node = client.reload(node)
wait_for_condition(client, node, check_label_removed(test_label), None, 10)
validate_label_deleted_on_node(client, node, test_label)
# Add label via API
node = client.reload(node)
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# clean up label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_a_add_k_edit_a_edit():
"""Add via API, Edit via kubectl, Edit via API"""
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
node_labels = node.labels.data_dict()
node_labels[test_label] = label_value
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
node = client.reload(node)
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value)
# edit label through kubectl
new_value = random_name()
command = "label nodes " + node_name + " " + \
test_label + "=" + new_value + " --overwrite"
print(command)
execute_kubectl_cmd(command, False)
node = client.reload(node)
time.sleep(2)
# New Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, label_value=new_value)
# edit label through API
node = client.reload(node)
node_labels = node.labels.data_dict()
new_value_2 = random_name()
node_labels[test_label] = new_value_2
client.update(node, labels=node_labels)
node = client.reload(node)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, new_value_2)
# clean up label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
def test_node_label_custom_add_edit_addnode():
""" Create a custom cluster, Add node labels via register command
Edit via API and change the existing label value ONLY
Add a control plane node with label same as the ORIGINAL one
And check the labels on all the nodes."""
test_label = random_name()
label_value = random_name()
cluster_custom["test_label"] = test_label
cluster_custom["label_value"] = label_value
client = cluster_detail["client"]
node_roles = [["worker", "controlplane", "etcd"]]
aws_nodes_list = []
cluster, aws_nodes = \
create_custom_node_label(node_roles, test_label, label_value, True)
create_kubeconfig(cluster)
for node in aws_nodes:
aws_nodes_list.append(node)
nodes = client.list_node(clusterId=cluster.id).data
node = nodes[0]
validate_label_set_on_node(client, node, test_label, label_value)
node_name_1 = node.nodeName
# edit label through API
node = client.reload(node)
node_labels = node.labels.data_dict()
new_value_2 = random_name()
node_labels[test_label] = new_value_2
client.update(node, labels=node_labels)
# cluster will go into updating state
cluster = validate_cluster_state(client, cluster, True,
intermediate_state="updating",
nodes_not_in_active_state=[])
node = client.reload(node)
# Label should be added
validate_label_set_on_node(client, node, test_label, new_value_2)
# add a control plane node with original label
aws_nodes = \
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
1, random_test_name(HOST_NAME))
for node in aws_nodes:
aws_nodes_list.append(node)
aws_node = aws_nodes[0]
docker_run_cmd = get_custom_host_registration_cmd(client, cluster,
["controlplane"],
aws_node)
docker_run_cmd = \
docker_run_cmd + " --label " + test_label + "=" + label_value
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 2)
cluster = validate_cluster_state(client, cluster,
intermediate_state="updating")
nodes = client.list_node(clusterId=cluster.id).data
# cluster cleanup
custom_cluster_add_edit["cluster"] = cluster
custom_cluster_add_edit["aws_node"] = aws_nodes_list
for node in nodes:
if node.nodeName != node_name_1:
validate_label_set_on_node(client, node, test_label, label_value)
else:
validate_label_set_on_node(client, node, test_label, new_value_2)
def test_node_label_node_template_add():
"""
This test validates label added through node template,
add label on node template, and validates the label
is available on the scaled up node
:return: None
"""
client = cluster_detail["client"]
cluster = cluster_node_template["cluster"]
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
# get existing nodes info
existing_labels = {}
for node in nodes:
existing_labels[node.nodeName] = {}
existing_labels[node.nodeName] = node.labels.data_dict()
test_label = random_name()
label_value = random_name()
# create a node template with a label
node_template_new, do_cloud_credential = \
create_node_template_label(client, test_label, label_value)
# Add a node in cluster
cluster, node_pools = add_node_cluster(node_template_new, cluster)
nodes = client.list_node(clusterId=cluster.id).data
# validate labels on nodes
for node in nodes:
if node.nodeName not in existing_labels.keys():
# check if label is set on node
validate_label_set_on_node(client, node, test_label, label_value)
else:
# check if the labels on the existing nodes are intact
assert existing_labels[node.nodeName] == node.labels.data_dict(), \
"Labels on existing nodes have changed"
@pytest.mark.run(after='test_node_label_node_template_add')
def test_node_label_node_template_edit():
"""
This test validates label added through node template,
edit label on node template, and validates new label
is available on the scaled up node
:param remove_resource: to delete the resource
:return:
"""
client = cluster_detail["client"]
cluster = cluster_node_template["cluster"]
node_template = cluster_node_template["node_template"]
do_cloud_credential = cluster_node_template["do_cloud_credential"]
test_label = cluster_node_template["test_label"]
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
existing_labels = {}
for node in nodes:
existing_labels[node.nodeName] = {}
existing_labels[node.nodeName] = node.labels.data_dict()
template_label = node_template.labels.data_dict()
assert test_label in template_label, \
"Label is NOT available on the node template"
new_value = random_name()
template_label[test_label] = new_value
node_template_new = client.update(node_template, labels=template_label,
cloudCredentialId=do_cloud_credential.id,
digitaloceanConfig=
{"region": "nyc3",
"size": "2gb",
"image": "ubuntu-16-04-x64"})
node_template_new = client.wait_success(node_template_new)
assert test_label in node_template_new["labels"], \
"Label is not set on node template"
assert node_template_new["labels"][test_label] == new_value
# Add a node in cluster
cluster, node_pools = add_node_cluster(node_template_new, cluster)
nodes = client.list_node(clusterId=cluster.id).data
"""check original label on the first node,
and the new label on the added node"""
# validate labels on nodes
for node in nodes:
if node.nodeName not in existing_labels.keys():
# check if label is set on node
validate_label_set_on_node(client, node, test_label, new_value)
else:
# check if the labels on the existing nodes are intact
assert existing_labels[node.nodeName] == node.labels.data_dict(), \
"Labels on existing nodes have changed"
@pytest.mark.run(after='test_node_label_node_template_edit')
def test_node_label_node_template_delete():
"""
This test validates label added through node template,
delete label on node template, and validates the label
is NOT available on the scaled up node
:return: None
"""
client = cluster_detail["client"]
cluster = cluster_node_template["cluster"]
node_template = cluster_node_template["node_template"]
do_cloud_credential = cluster_node_template["do_cloud_credential"]
test_label = cluster_node_template["test_label"]
create_kubeconfig(cluster_node_template["cluster"])
nodes = client.list_node(clusterId=cluster.id).data
existing_labels = {}
for node in nodes:
existing_labels[node.nodeName] = {}
existing_labels[node.nodeName] = node.labels.data_dict()
# delete label in node template
template_label = node_template.labels.data_dict()
del template_label[test_label]
# update node template
node_template_new = client.update(node_template, labels=template_label,
cloudCredentialId=do_cloud_credential.id,
digitaloceanConfig=
{"region": "nyc3",
"size": "2gb",
"image": "ubuntu-16-04-x64"})
node_template_new = client.wait_success(node_template_new)
assert test_label not in node_template_new["labels"], \
"Label is available on the node template"
# Add a node in cluster with new node template
cluster, node_pools = add_node_cluster(node_template_new, cluster)
nodes = client.list_node(clusterId=cluster.id).data
# validate labels on nodes
for node in nodes:
if node.nodeName not in existing_labels.keys():
node_labels = node.labels.data_dict()
assert test_label not in node_labels, \
"Label is NOT deleted on the node"
else:
# check if the labels on the existing nodes are intact
assert existing_labels[node.nodeName] == node.labels.data_dict(), \
"Labels on existing nodes have changed"
def test_node_label_node_template_edit_api():
"""
This test validates edit of label via API
which is added through node template
:return: None
"""
test_label = random_name()
label_value = random_name()
cluster, node_pools, node_template, do_cloud_credential = \
create_cluster_node_template_label(test_label, label_value)
client = get_user_client()
cluster_node_template_2["cluster"].append(cluster)
cluster_node_template_2["node_template"].append(node_template)
create_kubeconfig(cluster)
node = client.list_node(clusterId=cluster.id).data
node_id = node[0].id
node = client.by_id_node(node_id)
# Edit label on node via API
node_labels = node.labels.data_dict()
assert node_labels[test_label] == label_value
# edit label through API
new_value = random_name()
node_labels[test_label] = new_value
client.update(node, labels=node_labels)
node = client.reload(node)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, test_label, new_value)
def test_node_label_node_template_delete_api():
"""
This test validates delete of label via API
which is added through node template
:return: None
Expected failure because of issue -
https://github.com/rancher/rancher/issues/26604
"""
test_label = random_name()
label_value = random_name()
cluster, node_pools, node_template, do_cloud_credential = \
create_cluster_node_template_label(test_label, label_value)
client = get_user_client()
cluster_node_template_2["cluster"].append(cluster)
cluster_node_template_2["node_template"].append(node_template)
create_kubeconfig(cluster)
node = client.list_node(clusterId=cluster.id).data
node_id = node[0].id
node = client.by_id_node(node_id)
node_labels = node.labels.data_dict()
assert node_labels[test_label] == label_value
# delete label
del node_labels[test_label]
client.update(node, labels=node_labels)
# cluster will go into updating state
cluster = validate_cluster_state(client, cluster, True,
intermediate_state="updating",
nodes_not_in_active_state=[])
node = client.reload(node)
# label should be deleted
validate_label_deleted_on_node(client, node, test_label)
def test_node_label_custom_add():
"""
This test validates the label on a custom node
added through the registration command
:return:
"""
test_label = random_name()
label_value = random_name()
cluster_custom["test_label"] = test_label
cluster_custom["label_value"] = label_value
client = cluster_detail["client"]
node_roles = [["worker", "controlplane", "etcd"]]
if cluster_custom["cluster"] is None:
cluster_custom["cluster"], aws_nodes = \
create_custom_node_label(node_roles, test_label, label_value, True)
cluster = cluster_custom["cluster"]
cluster_custom["aws_node"] = aws_nodes
else:
cluster = cluster_custom["cluster"]
create_kubeconfig(cluster_custom["cluster"])
nodes = client.list_node(clusterId=cluster.id).data
node = nodes[0]
validate_label_set_on_node(client, node, test_label, label_value)
@pytest.mark.run(after='test_node_label_custom_add')
def test_node_label_custom_edit():
"""
This test validates edit on the label on the node -
added through custom registration command
:return: None
"""
create_kubeconfig(cluster_custom["cluster"])
client = cluster_detail["client"]
cluster = cluster_custom["cluster"]
test_label = cluster_custom["test_label"]
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
node_id = nodes[0].id
node = client.by_id_node(node_id)
# edit label through API
node = client.reload(node)
node_labels = node.labels.data_dict()
new_value = random_name()
node_labels[test_label] = new_value
client.update(node, labels=node_labels)
# cluster will go into updating state
cluster = validate_cluster_state(client, cluster, True,
intermediate_state="updating",
nodes_not_in_active_state=[])
node = client.reload(node)
validate_label_set_on_node(client, node, test_label, new_value)
cluster_custom["label_value"] = new_value
@pytest.mark.run(after='test_node_label_custom_edit')
def test_node_label_custom_add_additional():
"""
This test validates addition of labels on the custom nodes
:return: None
"""
create_kubeconfig(cluster_custom["cluster"])
client = cluster_detail["client"]
cluster = cluster_custom["cluster"]
test_label = cluster_custom["test_label"]
label_value = cluster_custom["label_value"]
new_label = random_name()
label_value_new = random_name()
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
node_id = nodes[0].id
node = client.by_id_node(node_id)
node_labels = node.labels.data_dict()
node_labels[new_label] = label_value_new
client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
wait_for_condition(client, node, check_label_added(test_label), None, 10)
validate_label_set_on_node(client, node, new_label, label_value_new)
validate_label_set_on_node(client, node, test_label, label_value)
# remove label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[new_label]
client.update(node, labels=node_labels)
@pytest.mark.run(after='test_node_label_custom_add_additional')
def test_node_label_custom_delete():
"""
This test deletes the label on the node via API
:return: None
Expected failure because of issue -
https://github.com/rancher/rancher/issues/26604
"""
create_kubeconfig(cluster_custom["cluster"])
client = cluster_detail["client"]
cluster = cluster_custom["cluster"]
test_label = cluster_custom["test_label"]
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
node_id = nodes[0].id
node = client.by_id_node(node_id)
# remove label
node = client.reload(node)
node_labels = node.labels.data_dict()
del node_labels[test_label]
client.update(node, labels=node_labels)
# cluster will go into updating state
cluster = validate_cluster_state(client, cluster, True,
intermediate_state="updating",
nodes_not_in_active_state=[])
node = client.reload(node)
# label should be deleted
validate_label_deleted_on_node(client, node, test_label)
@if_test_rbac
@pytest.mark.parametrize("role", roles)
def test_rbac_node_label_add(role):
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_labels = node.labels.data_dict()
# get user token and client
token = rbac_get_user_token_by_role(role)
print("token: ", token)
user_client = get_client_for_token(token)
node_labels[test_label] = label_value
if role == CLUSTER_OWNER:
user_client.update(node, labels=node_labels)
time.sleep(2)
# Label should be added
wait_for_condition(user_client, node,
check_label_added(test_label), None, 10)
validate_label_set_on_node(user_client, node, test_label, label_value)
else:
with pytest.raises(ApiError) as e:
user_client.update(node, labels=node_labels)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", roles)
def test_rbac_node_label_add_kubectl(role):
test_label = random_name()
label_value = random_name()
# get node details
client, node = get_node_details(cluster_detail["cluster"], cluster_detail["client"])
node_name = node.nodeName
# get user token and client
token = rbac_get_user_token_by_role(role)
user_client = get_client_for_token(token)
print(cluster_detail["cluster"]["id"])
print(cluster_detail["cluster"])
cluster = user_client.list_cluster(id=cluster_detail["cluster"]["id"]).data
print(cluster)
create_kubeconfig(cluster[0])
# add label on node
command = "label nodes " + node_name + " " + test_label + "=" + label_value
if role == CLUSTER_OWNER:
execute_kubectl_cmd(command, False)
time.sleep(2)
# Label should be added
wait_for_condition(user_client, node,
check_label_added(test_label), None, 10)
validate_label_set_on_node(user_client, node, test_label, label_value)
elif role == CLUSTER_MEMBER:
result = execute_kubectl_cmd(command, False, stderr=True)
result = result.decode('ascii')
assert "cannot patch resource \"nodes\"" in result
assert "forbidden" in result
else:
result = execute_kubectl_cmd(command, False, stderr=True)
result = result.decode('ascii')
assert "cannot get resource \"nodes\"" in result
assert "forbidden" in result
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
cluster_detail["client"], cluster_detail["cluster"] = \
get_user_client_and_cluster()
test_label = random_name()
label_value = random_name()
"""
Create a cluster for node template related test cases
"""
cluster_node_template["cluster"], \
node_pools, \
cluster_node_template["node_template"], \
cluster_node_template["do_cloud_credential"] = \
create_cluster_node_template_label(test_label, label_value)
cluster_node_template["node_pools"] = node_pools[0]
cluster_node_template["test_label"] = test_label
cluster_node_template["label_value"] = label_value
def fin():
client = get_user_client()
cluster = cluster_node_template["cluster"]
if cluster is not None:
node_pools_list = client.list_nodePool(clusterId=cluster.id).data
# get unique node template ids
client.delete(cluster_node_template["cluster"])
wait_for_cluster_delete(client, cluster["name"])
time.sleep(10)
unique_node_pool = {}
for node_pool in node_pools_list:
if node_pool.nodeTemplateId not in unique_node_pool.keys():
unique_node_pool[node_pool.nodeTemplateId] = \
client.list_node_template(
id=node_pool.nodeTemplateId).data[0]
print("unique_node_pool: ", unique_node_pool)
for key, value in unique_node_pool.items():
client.delete(value)
if cluster_custom["cluster"] is not None:
client.delete(cluster_custom["cluster"])
if cluster_custom["aws_node"] is not None:
delete_node(cluster_custom["aws_node"])
if custom_cluster_add_edit["cluster"] is not None:
client.delete(custom_cluster_add_edit["cluster"])
if custom_cluster_add_edit["aws_node"] is not None:
delete_node(custom_cluster_add_edit["aws_node"])
if len(cluster_node_template_2["cluster"]) != 0:
for cluster in cluster_node_template_2["cluster"]:
client.delete(cluster)
wait_for_cluster_delete(client, cluster.name)
time.sleep(10)
for node_template in cluster_node_template_2["node_template"]:
client.reload(node_template)
client.delete(node_template)
request.addfinalizer(fin)
def check_cluster_deleted(client):
def _find_condition(resource):
cluster = client.reload(resource)
if len(cluster["data"]) == 0:
return True
else:
return False
return _find_condition
def check_label_added(test_label):
def _find_condition(resource):
node_labels = resource.labels.data_dict()
if test_label in node_labels:
return True
else:
return False
return _find_condition
def check_label_removed(test_label):
def _find_condition(resource):
node_labels = resource.labels.data_dict()
if test_label not in node_labels:
return True
else:
return False
return _find_condition
def validate_label_set_on_node(client, node, test_label, label_value):
"""
This method checks if the label is added on the node via API and kubectl
:param client: user client
:param node: node on which user has to validate if the label is added
:param test_label: Label to be validated on the node
:param label_value: label value to be checked
:return: None
"""
print("label_value: ", label_value)
# check via API
node = client.reload(node)
node_labels = node.labels.data_dict()
assert node_labels[test_label] == label_value
# check via kubectl
node_name = node.nodeName
command = " get nodes " + node_name
node_detail = execute_kubectl_cmd(command)
print(node_detail["metadata"]["labels"])
assert test_label in node_detail["metadata"]["labels"], \
"Label is not set in kubectl"
assert node_detail["metadata"]["labels"][test_label] == label_value
def validate_label_deleted_on_node(client, node, test_label):
"""
This method checks if the label is deleted on the node via API and kubectl
:param client: user client
:param node: node on which user has to validate if the label is deleted
:param test_label: Label to be validated on the node
:return: None
"""
# check via API
node = client.reload(node)
node_labels = node.labels.data_dict()
assert test_label not in node_labels
# check via kubectl
node_name = node.nodeName
command = " get nodes " + node_name
print(command)
node_detail = execute_kubectl_cmd(command)
print(node_detail["metadata"]["labels"])
assert test_label not in node_detail["metadata"]["labels"]
def add_node_cluster(node_template, cluster):
"""
This method adds a node pool to a given cluster
:param node_template: node pool uses this to create a node
:param cluster: node pool is added to this cluster
:return: cluster, node_pools
"""
client = get_user_client()
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"controlPlane": False,
"etcd": False,
"worker": True,
"quantity": 1,
"clusterId": None}
nodes.append(node)
node_pools = []
for node in nodes:
node["clusterId"] = cluster.id
success = False
start = time.time()
while not success:
if time.time() - start > 10:
raise AssertionError(
"Timed out waiting for cluster owner global Roles")
try:
time.sleep(1)
node_pool = client.create_node_pool(**node)
success = True
except ApiError:
success = False
node_pool = client.wait_success(node_pool)
node_pools.append(node_pool)
cluster = validate_cluster_state(client, cluster,
check_intermediate_state=False)
return cluster, node_pools
def create_cluster_node_template_label(test_label, label_value):
"""
This method create a node template with the label key and value provided.
Creates a cluster with nodepool, which uses the above node template.
Cluster spec: 1 node all roles
:param test_label: label to add in the node template
:param label_value: label value
:return: cluster, node_pools, node_template, do_cloud_credential
"""
client = get_user_client()
node_template, do_cloud_credential = \
create_node_template_label(client, test_label, label_value)
assert test_label in node_template["labels"], \
"Label is not set on node template"
assert node_template["labels"][test_label] == label_value
nodes = []
node_name = random_node_name()
node = {"hostnamePrefix": node_name,
"nodeTemplateId": node_template.id,
"controlPlane": True,
"etcd": True,
"worker": True,
"quantity": 1,
"clusterId": None}
nodes.append(node)
cluster = client.create_cluster(
name=random_name(),
rancherKubernetesEngineConfig=rke_config)
node_pools = []
for node in nodes:
node["clusterId"] = cluster.id
success = False
start = time.time()
while not success:
if time.time() - start > 10:
raise AssertionError(
"Timed out waiting for cluster owner global Roles")
try:
time.sleep(1)
node_pool = client.create_node_pool(**node)
success = True
except ApiError:
success = False
node_pool = client.wait_success(node_pool)
node_pools.append(node_pool)
cluster = validate_cluster_state(client, cluster)
return cluster, node_pools, node_template, do_cloud_credential
def create_custom_node_label(node_roles, test_label,
label_value, random_cluster_name=False):
"""
This method creates nodes from AWS and adds the label key and value to
the register command and deploys a custom cluster.
:param node_roles: list of node roles for the cluster
:param test_label: label to add in the docker register command
:param label_value: label value to add in the docker register command
:param random_cluster_name: cluster name
:return: cluster and aws nodes created
"""
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
len(node_roles), random_test_name(HOST_NAME))
client = get_user_client()
cluster_name = random_name() if random_cluster_name \
else evaluate_clustername()
cluster = client.create_cluster(name=cluster_name,
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster, node_roles[i],
aws_node)
for nr in node_roles[i]:
aws_node.roles.append(nr)
docker_run_cmd = docker_run_cmd + " --label " + \
test_label + "=" + label_value
aws_node.execute_command(docker_run_cmd)
i += 1
cluster = validate_cluster_state(client, cluster)
return cluster, aws_nodes
def create_node_template_label(client, test_label, label_value):
"""
This method adds a given label with key: test_label and value: label_value
to a node template and returns the node template
:param client: user client
:param test_label: label to add in the node template
:param label_value: value of the label to add in the node template
:return: node template and do cloud credential value
"""
do_cloud_credential_config = {"accessToken": DO_ACCESSKEY}
do_cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig=do_cloud_credential_config
)
node_template = client.create_node_template(
digitaloceanConfig={"region": "nyc3",
"size": "2gb",
"image": "ubuntu-16-04-x64"},
name=random_name(),
driver="digitalocean",
cloudCredentialId=do_cloud_credential.id,
useInternalIpAddress=True,
labels={"cattle.io/creator": "norman", test_label: label_value})
node_template = client.wait_success(node_template)
return node_template, do_cloud_credential
| 42,943 | 35.424088 | 88 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_airgap.py
|
import base64
import os
import pytest
import re
import time
from lib.aws import AWS_USER
from .common import (
ADMIN_PASSWORD, AmazonWebServices, run_command, wait_for_status_code,
TEST_IMAGE, TEST_IMAGE_NGINX, TEST_IMAGE_OS_BASE, readDataFile,
DEFAULT_CLUSTER_STATE_TIMEOUT
)
from .test_custom_host_reg import (
random_test_name, RANCHER_SERVER_VERSION, HOST_NAME, AGENT_REG_CMD
)
from .test_create_ha import (
set_url_and_password,
RANCHER_HA_CERT_OPTION, RANCHER_VALID_TLS_CERT, RANCHER_VALID_TLS_KEY
)
from .test_import_k3s_cluster import (RANCHER_K3S_VERSION)
PRIVATE_REGISTRY_USERNAME = os.environ.get("RANCHER_BASTION_USERNAME")
PRIVATE_REGISTRY_PASSWORD = \
os.environ.get("RANCHER_BASTION_PASSWORD", ADMIN_PASSWORD)
BASTION_ID = os.environ.get("RANCHER_BASTION_ID", "")
NUMBER_OF_INSTANCES = int(os.environ.get("RANCHER_AIRGAP_INSTANCE_COUNT", "1"))
IMAGE_LIST = os.environ.get("RANCHER_IMAGE_LIST", ",".join(
[TEST_IMAGE, TEST_IMAGE_NGINX, TEST_IMAGE_OS_BASE])).split(",")
ARCH = os.environ.get("RANCHER_ARCH", "amd64")
AG_HOST_NAME = random_test_name(HOST_NAME)
RANCHER_AG_INTERNAL_HOSTNAME = AG_HOST_NAME + "-internal.qa.rancher.space"
RANCHER_AG_HOSTNAME = AG_HOST_NAME + ".qa.rancher.space"
RESOURCE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
SSH_KEY_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'.ssh')
def test_deploy_bastion():
node = deploy_bastion_server()
assert node.public_ip_address is not None
def test_deploy_airgap_rancher(check_hostname_length):
bastion_node = deploy_bastion_server()
save_res, load_res = add_rancher_images_to_private_registry(bastion_node)
assert "Image pull success: rancher/rancher:{}".format(
RANCHER_SERVER_VERSION) in save_res[0]
assert "The push refers to repository [{}/rancher/rancher]".format(
bastion_node.host_name) in load_res[0]
ag_node = deploy_airgap_rancher(bastion_node)
public_dns = create_nlb_and_add_targets([ag_node])
print(
"\nConnect to bastion node with:\nssh -i {}.pem {}@{}\n"
"Connect to rancher node by connecting to bastion, then run:\n"
"ssh -i {}.pem {}@{}\n\nOpen the Rancher UI with: https://{}\n"
"** IMPORTANT: SET THE RANCHER SERVER URL UPON INITIAL LOGIN TO: {} **"
"\nWhen creating a cluster, enable private registry with below"
" settings:\nPrivate Registry URL: {}\nPrivate Registry User: {}\n"
"Private Registry Password: (default admin password or "
"whatever you set in RANCHER_BASTION_PASSWORD)\n".format(
bastion_node.ssh_key_name, AWS_USER, bastion_node.host_name,
bastion_node.ssh_key_name, AWS_USER, ag_node.private_ip_address,
public_dns, RANCHER_AG_INTERNAL_HOSTNAME,
bastion_node.host_name, PRIVATE_REGISTRY_USERNAME))
time.sleep(180)
setup_rancher_server()
def test_prepare_airgap_nodes():
bastion_node = get_bastion_node(BASTION_ID)
ag_nodes = prepare_airgap_node(bastion_node, NUMBER_OF_INSTANCES)
assert len(ag_nodes) == NUMBER_OF_INSTANCES
print(
'{} airgapped instance(s) created.\n'
'Connect to these and run commands by connecting to bastion node, '
'then running the following command (with the quotes):\n'
'ssh -i {}.pem {}@NODE_PRIVATE_IP '
'"docker login {} -u {} -p {} && COMMANDS"'.format(
NUMBER_OF_INSTANCES, bastion_node.ssh_key_name, AWS_USER,
bastion_node.host_name, PRIVATE_REGISTRY_USERNAME,
PRIVATE_REGISTRY_PASSWORD))
for ag_node in ag_nodes:
assert ag_node.private_ip_address is not None
assert ag_node.public_ip_address is None
def test_deploy_airgap_nodes():
bastion_node = get_bastion_node(BASTION_ID)
ag_nodes = prepare_airgap_node(bastion_node, NUMBER_OF_INSTANCES)
assert len(ag_nodes) == NUMBER_OF_INSTANCES
print(
'{} airgapped instance(s) created.\n'
'Connect to these and run commands by connecting to bastion node, '
'then running the following command (with the quotes):\n'
'ssh -i {}.pem {}@NODE_PRIVATE_IP '
'"docker login {} -u {} -p {} && COMMANDS"'.format(
NUMBER_OF_INSTANCES, bastion_node.ssh_key_name, AWS_USER,
bastion_node.host_name, PRIVATE_REGISTRY_USERNAME,
PRIVATE_REGISTRY_PASSWORD))
for ag_node in ag_nodes:
assert ag_node.private_ip_address is not None
assert ag_node.public_ip_address is None
results = []
for ag_node in ag_nodes:
deploy_result = run_command_on_airgap_node(bastion_node, ag_node,
AGENT_REG_CMD)
results.append(deploy_result)
for result in results:
assert "Downloaded newer image for {}/rancher/rancher-agent".format(
bastion_node.host_name) in result[1]
def test_deploy_airgap_k3s_private_registry():
bastion_node = get_bastion_node(BASTION_ID)
failures = add_k3s_images_to_private_registry(bastion_node,
RANCHER_K3S_VERSION)
assert failures == [], "Failed to add images: {}".format(failures)
ag_nodes = prepare_airgap_k3s(bastion_node, NUMBER_OF_INSTANCES,
'private_registry')
assert len(ag_nodes) == NUMBER_OF_INSTANCES
print(
'{} airgapped k3s instance(s) created.\n'
'Connect to these and run commands by connecting to bastion node, '
'then connecting to these:\n'
'ssh -i {}.pem {}@NODE_PRIVATE_IP'.format(
NUMBER_OF_INSTANCES, bastion_node.ssh_key_name, AWS_USER))
for ag_node in ag_nodes:
assert ag_node.private_ip_address is not None
assert ag_node.public_ip_address is None
deploy_airgap_k3s_cluster(bastion_node, ag_nodes)
wait_for_airgap_pods_ready(bastion_node, ag_nodes)
# Optionally add k3s cluster to Rancher server
if AGENT_REG_CMD:
print("Adding to rancher server")
result = run_command_on_airgap_node(bastion_node, ag_nodes[0],
AGENT_REG_CMD)
assert "deployment.apps/cattle-cluster-agent created" in result
def test_deploy_airgap_k3s_tarball():
bastion_node = get_bastion_node(BASTION_ID)
add_k3s_tarball_to_bastion(bastion_node, RANCHER_K3S_VERSION)
ag_nodes = prepare_airgap_k3s(bastion_node, NUMBER_OF_INSTANCES, 'tarball')
assert len(ag_nodes) == NUMBER_OF_INSTANCES
print(
'{} airgapped k3s instance(s) created.\n'
'Connect to these and run commands by connecting to bastion node, '
'then connecting to these:\n'
'ssh -i {}.pem {}@NODE_PRIVATE_IP'.format(
NUMBER_OF_INSTANCES, bastion_node.ssh_key_name, AWS_USER))
for ag_node in ag_nodes:
assert ag_node.private_ip_address is not None
assert ag_node.public_ip_address is None
deploy_airgap_k3s_cluster(bastion_node, ag_nodes)
wait_for_airgap_pods_ready(bastion_node, ag_nodes)
# Optionally add k3s cluster to Rancher server
if AGENT_REG_CMD:
print("Adding to rancher server")
for num, ag_node in enumerate(ag_nodes):
prepare_private_registry_on_k3s_node(bastion_node, ag_node)
restart_k3s = 'sudo systemctl restart k3s-agent'
if num == 0:
restart_k3s = 'sudo systemctl restart k3s && ' \
'sudo chmod 644 /etc/rancher/k3s/k3s.yaml'
run_command_on_airgap_node(bastion_node, ag_node, restart_k3s)
result = run_command_on_airgap_node(bastion_node, ag_nodes[0],
AGENT_REG_CMD)
assert "deployment.apps/cattle-cluster-agent created" in result
def test_add_rancher_images_to_private_registry():
bastion_node = get_bastion_node(BASTION_ID)
save_res, load_res = add_rancher_images_to_private_registry(bastion_node)
assert "Image pull success: rancher/rancher:{}".format(
RANCHER_SERVER_VERSION) in save_res[0]
assert "The push refers to repository [{}/rancher/rancher]".format(
bastion_node.host_name) in load_res[0]
def test_add_images_to_private_registry():
bastion_node = get_bastion_node(BASTION_ID)
failures = add_images_to_private_registry(bastion_node, IMAGE_LIST)
assert failures == [], "Failed to add images: {}".format(failures)
def test_deploy_private_registry_without_image_push():
bastion_node = deploy_bastion_server()
save_res, load_res = add_rancher_images_to_private_registry(
bastion_node, push_images=False)
assert "Image pull success: rancher/rancher:{}".format(
RANCHER_SERVER_VERSION) in save_res[0]
assert load_res is None
def setup_rancher_server():
base_url = "https://" + RANCHER_AG_HOSTNAME
wait_for_status_code(url=base_url + "/v3", expected_code=401)
auth_url = base_url + "/v3-public/localproviders/local?action=login"
wait_for_status_code(url=auth_url, expected_code=200)
set_url_and_password(base_url, "https://" + RANCHER_AG_INTERNAL_HOSTNAME)
def deploy_bastion_server():
node_name = AG_HOST_NAME + "-bastion"
# Create Bastion Server in AWS
bastion_node = AmazonWebServices().create_node(node_name)
setup_ssh_key(bastion_node)
# Get resources for private registry and generate self signed certs
get_resources_command = \
'scp -q -i {}/{}.pem -o StrictHostKeyChecking=no ' \
'-o UserKnownHostsFile=/dev/null -r {}/airgap/basic-registry/ ' \
'{}@{}:~/basic-registry/'.format(
SSH_KEY_DIR, bastion_node.ssh_key_name, RESOURCE_DIR,
AWS_USER, bastion_node.host_name)
run_command(get_resources_command, log_out=False)
generate_certs_command = \
'docker run -v $PWD/certs:/certs ' \
'-e CA_SUBJECT="My own root CA" ' \
'-e CA_EXPIRE="1825" -e SSL_EXPIRE="365" ' \
'-e SSL_SUBJECT="{}" -e SSL_DNS="{}" ' \
'-e SILENT="true" ' \
'superseb/omgwtfssl'.format(bastion_node.host_name,
bastion_node.host_name)
bastion_node.execute_command(generate_certs_command)
move_certs_command = \
'sudo cat certs/cert.pem certs/ca.pem > ' \
'basic-registry/nginx_config/domain.crt && ' \
'sudo cat certs/key.pem > basic-registry/nginx_config/domain.key'
bastion_node.execute_command(move_certs_command)
# Add credentials for private registry
store_creds_command = \
'docker run --rm melsayed/htpasswd {} {} >> ' \
'basic-registry/nginx_config/registry.password'.format(
PRIVATE_REGISTRY_USERNAME, PRIVATE_REGISTRY_PASSWORD)
bastion_node.execute_command(store_creds_command)
# Ensure docker uses the certs that were generated
update_docker_command = \
'sudo mkdir -p /etc/docker/certs.d/{} && ' \
'sudo cp ~/certs/ca.pem /etc/docker/certs.d/{}/ca.crt && ' \
'sudo service docker restart'.format(
bastion_node.host_name, bastion_node.host_name)
bastion_node.execute_command(update_docker_command)
# Run private registry
docker_compose_command = \
'cd basic-registry && ' \
'sudo curl -L "https://github.com/docker/compose/releases/' \
'download/1.24.1/docker-compose-$(uname -s)-$(uname -m)" ' \
'-o /usr/local/bin/docker-compose && ' \
'sudo chmod +x /usr/local/bin/docker-compose && ' \
'sudo docker-compose up -d'
bastion_node.execute_command(docker_compose_command)
time.sleep(5)
print("Bastion Server Details:\nNAME: {}\nHOST NAME: {}\n"
"INSTANCE ID: {}\n".format(node_name, bastion_node.host_name,
bastion_node.provider_node_id))
return bastion_node
def add_rancher_images_to_private_registry(bastion_node, push_images=True):
get_images_command = \
'wget -O rancher-images.txt https://github.com/rancher/rancher/' \
'releases/download/{0}/rancher-images.txt && ' \
'wget -O rancher-save-images.sh https://github.com/rancher/rancher/' \
'releases/download/{0}/rancher-save-images.sh && ' \
'wget -O rancher-load-images.sh https://github.com/rancher/rancher/' \
'releases/download/{0}/rancher-load-images.sh'.format(
RANCHER_SERVER_VERSION)
bastion_node.execute_command(get_images_command)
# Remove the "docker save" and "docker load" lines to save time
edit_save_and_load_command = \
"sudo sed -i '58d' rancher-save-images.sh && " \
"sudo sed -i '76d' rancher-load-images.sh && " \
"chmod +x rancher-save-images.sh && chmod +x rancher-load-images.sh"
bastion_node.execute_command(edit_save_and_load_command)
save_images_command = \
"./rancher-save-images.sh --image-list ./rancher-images.txt"
save_res = bastion_node.execute_command(save_images_command)
if push_images:
load_images_command = \
"docker login {} -u {} -p {} && " \
"./rancher-load-images.sh --image-list ./rancher-images.txt " \
"--registry {}".format(
bastion_node.host_name, PRIVATE_REGISTRY_USERNAME,
PRIVATE_REGISTRY_PASSWORD, bastion_node.host_name)
load_res = bastion_node.execute_command(load_images_command)
print(load_res)
else:
load_res = None
return save_res, load_res
def add_k3s_tarball_to_bastion(bastion_node, k3s_version):
# Get k3s files associated with the specified version
k3s_binary = 'k3s'
if ARCH == 'arm64':
k3s_binary = 'k3s-arm64'
get_tarball_command = \
'wget -O k3s-airgap-images-{1}.tar https://github.com/rancher/k3s/' \
'releases/download/{0}/k3s-airgap-images-{1}.tar && ' \
'wget -O k3s-install.sh https://get.k3s.io/ && ' \
'wget -O k3s https://github.com/rancher/k3s/' \
'releases/download/{0}/{2}'.format(k3s_version, ARCH, k3s_binary)
bastion_node.execute_command(get_tarball_command)
def add_k3s_images_to_private_registry(bastion_node, k3s_version):
# Get k3s files associated with the specified version
k3s_binary = 'k3s'
if ARCH == 'arm64':
k3s_binary = 'k3s-arm64'
get_images_command = \
'wget -O k3s-images.txt https://github.com/rancher/k3s/' \
'releases/download/{0}/k3s-images.txt && ' \
'wget -O k3s-install.sh https://get.k3s.io/ && ' \
'wget -O k3s https://github.com/rancher/k3s/' \
'releases/download/{0}/{1}'.format(k3s_version, k3s_binary)
bastion_node.execute_command(get_images_command)
images = bastion_node.execute_command(
'cat k3s-images.txt')[0].strip().split("\n")
assert images
return add_cleaned_images(bastion_node, images)
def add_cleaned_images(bastion_node, images):
failures = []
for image in images:
pull_image(bastion_node, image)
cleaned_image = re.search(".*(rancher/.*)", image).group(1)
tag_image(bastion_node, cleaned_image)
push_image(bastion_node, cleaned_image)
validate_result = validate_image(bastion_node, cleaned_image)
if bastion_node.host_name not in validate_result[0]:
failures.append(image)
return failures
def add_images_to_private_registry(bastion_node, image_list):
failures = []
for image in image_list:
pull_image(bastion_node, image)
tag_image(bastion_node, image)
push_image(bastion_node, image)
validate_result = validate_image(bastion_node, image)
if bastion_node.host_name not in validate_result[0]:
failures.append(image)
return failures
def pull_image(bastion_node, image):
pull_image_command = "docker pull {}".format(image)
bastion_node.execute_command(pull_image_command)
def tag_image(bastion_node, image):
tag_image_command = "docker image tag {0} {1}/{0}".format(
image, bastion_node.host_name)
bastion_node.execute_command(tag_image_command)
def push_image(bastion_node, image):
push_image_command = \
"docker login {} -u {} -p {} && docker push {}/{}".format(
bastion_node.host_name, PRIVATE_REGISTRY_USERNAME,
PRIVATE_REGISTRY_PASSWORD, bastion_node.host_name, image)
bastion_node.execute_command(push_image_command)
def validate_image(bastion_node, image):
validate_image_command = "docker image ls {}/{}".format(
bastion_node.host_name, image)
return bastion_node.execute_command(validate_image_command)
def prepare_airgap_node(bastion_node, number_of_nodes):
node_name = AG_HOST_NAME + "-airgap"
# Create Airgap Node in AWS
ag_nodes = AmazonWebServices().create_multiple_nodes(
number_of_nodes, node_name, public_ip=False)
for num, ag_node in enumerate(ag_nodes):
# Update docker for the user in node
ag_node_update_docker = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no {}@{} ' \
'"sudo usermod -aG docker {}"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address, AWS_USER)
bastion_node.execute_command(ag_node_update_docker)
# Update docker in node with bastion cert details
ag_node_create_dir = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no {}@{} ' \
'"sudo mkdir -p /etc/docker/certs.d/{} && ' \
'sudo chown {} /etc/docker/certs.d/{}"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address, bastion_node.host_name,
AWS_USER, bastion_node.host_name)
bastion_node.execute_command(ag_node_create_dir)
ag_node_write_cert = \
'scp -i "{}.pem" -o StrictHostKeyChecking=no ' \
'/etc/docker/certs.d/{}/ca.crt ' \
'{}@{}:/etc/docker/certs.d/{}/ca.crt'.format(
bastion_node.ssh_key_name, bastion_node.host_name,
AWS_USER, ag_node.private_ip_address, bastion_node.host_name)
bastion_node.execute_command(ag_node_write_cert)
ag_node_restart_docker = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no {}@{} ' \
'"sudo service docker restart"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(ag_node_restart_docker)
print("Airgapped Instance Details:\nNAME: {}-{}\nPRIVATE IP: {}\n"
"".format(node_name, num, ag_node.private_ip_address))
return ag_nodes
def prepare_private_registry_on_k3s_node(bastion_node, ag_node):
# Ensure registry file has correct data
reg_file = readDataFile(RESOURCE_DIR, "airgap/registries.yaml")
reg_file = reg_file.replace("$PRIVATE_REG", bastion_node.host_name)
reg_file = reg_file.replace("$USERNAME", PRIVATE_REGISTRY_USERNAME)
reg_file = reg_file.replace("$PASSWORD", PRIVATE_REGISTRY_PASSWORD)
# Add registry file to node
ag_node_create_dir = \
'sudo mkdir -p /etc/rancher/k3s && ' \
'sudo chown {} /etc/rancher/k3s'.format(AWS_USER)
run_command_on_airgap_node(bastion_node, ag_node,
ag_node_create_dir)
write_reg_file_command = \
"cat <<EOT >> /etc/rancher/k3s/registries.yaml\n{}\nEOT".format(
reg_file)
run_command_on_airgap_node(bastion_node, ag_node,
write_reg_file_command)
def prepare_airgap_k3s(bastion_node, number_of_nodes, method):
node_name = AG_HOST_NAME + "-k3s-airgap"
# Create Airgap Node in AWS
ag_nodes = AmazonWebServices().create_multiple_nodes(
number_of_nodes, node_name, public_ip=False)
for num, ag_node in enumerate(ag_nodes):
# Copy relevant k3s files to airgapped node
ag_node_copy_files = \
'scp -i "{0}.pem" -o StrictHostKeyChecking=no ./k3s-install.sh ' \
'{1}@{2}:~/install.sh && ' \
'scp -i "{0}.pem" -o StrictHostKeyChecking=no ./k3s ' \
'{1}@{2}:~/k3s && ' \
'scp -i "{0}.pem" -o StrictHostKeyChecking=no certs/* ' \
'{1}@{2}:~/'.format(bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(ag_node_copy_files)
ag_node_make_executable = \
'sudo mv ./k3s /usr/local/bin/k3s && ' \
'sudo chmod +x /usr/local/bin/k3s && sudo chmod +x install.sh'
run_command_on_airgap_node(bastion_node, ag_node,
ag_node_make_executable)
if method == 'private_registry':
prepare_private_registry_on_k3s_node(bastion_node, ag_node)
elif method == 'tarball':
ag_node_copy_tarball = \
'scp -i "{0}.pem" -o StrictHostKeyChecking=no ' \
'./k3s-airgap-images-{3}.tar ' \
'{1}@{2}:~/k3s-airgap-images-{3}.tar'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address, ARCH)
bastion_node.execute_command(ag_node_copy_tarball)
ag_node_add_tarball_to_dir = \
'sudo mkdir -p /var/lib/rancher/k3s/agent/images/ && ' \
'sudo cp ./k3s-airgap-images-{}.tar ' \
'/var/lib/rancher/k3s/agent/images/'.format(ARCH)
run_command_on_airgap_node(bastion_node, ag_node,
ag_node_add_tarball_to_dir)
print("Airgapped K3S Instance Details:\nNAME: {}-{}\nPRIVATE IP: {}\n"
"".format(node_name, num, ag_node.private_ip_address))
return ag_nodes
def deploy_airgap_k3s_cluster(bastion_node, ag_nodes):
token = ""
server_ip = ag_nodes[0].private_ip_address
for num, ag_node in enumerate(ag_nodes):
if num == 0:
# Install k3s server
install_k3s_server = \
'INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh && ' \
'sudo chmod 644 /etc/rancher/k3s/k3s.yaml'
run_command_on_airgap_node(bastion_node, ag_node,
install_k3s_server)
token_command = 'sudo cat /var/lib/rancher/k3s/server/node-token'
token = run_command_on_airgap_node(bastion_node, ag_node,
token_command)[0].strip()
else:
install_k3s_worker = \
'INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://{}:6443 ' \
'K3S_TOKEN={} ./install.sh'.format(server_ip, token)
run_command_on_airgap_node(bastion_node, ag_node,
install_k3s_worker)
time.sleep(10)
def deploy_airgap_rancher(bastion_node):
ag_node = prepare_airgap_node(bastion_node, 1)[0]
if "v2.5" in RANCHER_SERVER_VERSION or \
"v2.6" in RANCHER_SERVER_VERSION or \
"master" in RANCHER_SERVER_VERSION:
privileged = "--privileged"
else:
privileged = ""
if RANCHER_HA_CERT_OPTION == 'byo-valid':
write_cert_command = "cat <<EOT >> fullchain.pem\n{}\nEOT".format(
base64.b64decode(RANCHER_VALID_TLS_CERT).decode("utf-8"))
run_command_on_airgap_node(bastion_node, ag_node,
write_cert_command)
write_key_command = "cat <<EOT >> privkey.pem\n{}\nEOT".format(
base64.b64decode(RANCHER_VALID_TLS_KEY).decode("utf-8"))
run_command_on_airgap_node(bastion_node, ag_node,
write_key_command)
deploy_rancher_command = \
'sudo docker run -d {} --restart=unless-stopped ' \
'-p 80:80 -p 443:443 ' \
'-v ${{PWD}}/fullchain.pem:/etc/rancher/ssl/cert.pem ' \
'-v ${{PWD}}/privkey.pem:/etc/rancher/ssl/key.pem ' \
'-e CATTLE_SYSTEM_DEFAULT_REGISTRY={} ' \
'-e CATTLE_SYSTEM_CATALOG=bundled ' \
'{}/rancher/rancher:{} --no-cacerts --trace'.format(
privileged, bastion_node.host_name, bastion_node.host_name,
RANCHER_SERVER_VERSION)
else:
deploy_rancher_command = \
'sudo docker run -d {} --restart=unless-stopped ' \
'-p 80:80 -p 443:443 ' \
'-e CATTLE_SYSTEM_DEFAULT_REGISTRY={} ' \
'-e CATTLE_SYSTEM_CATALOG=bundled {}/rancher/rancher:{} --trace'.format(
privileged, bastion_node.host_name, bastion_node.host_name,
RANCHER_SERVER_VERSION)
deploy_result = run_command_on_airgap_node(bastion_node, ag_node,
deploy_rancher_command,
log_out=True)
assert "Downloaded newer image for {}/rancher/rancher:{}".format(
bastion_node.host_name, RANCHER_SERVER_VERSION) in deploy_result[1]
return ag_node
def run_docker_command_on_airgap_node(bastion_node, ag_node, cmd,
log_out=False):
docker_login_command = "docker login {} -u {} -p {}".format(
bastion_node.host_name,
PRIVATE_REGISTRY_USERNAME, PRIVATE_REGISTRY_PASSWORD)
if cmd.startswith("sudo"):
docker_login_command = "sudo " + docker_login_command
ag_command = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no {}@{} ' \
'"{} && {}"'.format(
bastion_node.ssh_key_name, AWS_USER, ag_node.private_ip_address,
docker_login_command, cmd)
result = bastion_node.execute_command(ag_command)
if log_out:
print("Running command: {}".format(ag_command))
print("Result: {}".format(result))
return result
def run_command_on_airgap_node(bastion_node, ag_node, cmd, log_out=False):
if cmd.startswith("docker") or cmd.startswith("sudo docker"):
return run_docker_command_on_airgap_node(
bastion_node, ag_node, cmd, log_out)
ag_command = \
'ssh -i "{}.pem" -o StrictHostKeyChecking=no {}@{} ' \
'"{}"'.format(
bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address, cmd)
result = bastion_node.execute_command(ag_command)
if log_out:
print("Running command: {}".format(ag_command))
print("Result: {}".format(result))
return result
def wait_for_airgap_pods_ready(bastion_node, ag_nodes,
kubectl='kubectl', kubeconfig=None):
if kubeconfig:
node_cmd = "{} get nodes --kubeconfig {}".format(kubectl, kubeconfig)
command = "{} get pods -A --kubeconfig {}".format(kubectl, kubeconfig)
else:
node_cmd = "{} get nodes".format(kubectl)
command = "{} get pods -A".format(kubectl)
start = time.time()
wait_for_pods_to_be_ready = True
while wait_for_pods_to_be_ready:
unready_pods = []
unready_nodes = []
if time.time() - start > DEFAULT_CLUSTER_STATE_TIMEOUT:
raise AssertionError("Timed out waiting for cluster to be ready")
time.sleep(10)
nodes = run_command_on_airgap_node(bastion_node, ag_nodes[0], node_cmd)
nodes_arr = nodes[0].strip().split("\n")[1:]
for node in nodes_arr:
if "NotReady" in node:
print("Waiting for node: {}".format(node))
unready_nodes.append(node)
if unready_nodes or not nodes_arr:
continue
pods = run_command_on_airgap_node(bastion_node, ag_nodes[0], command)
pods_arr = pods[0].strip().split("\n")[1:]
for pod in pods_arr:
if "Completed" not in pod and "Running" not in pod:
print("Waiting for pod: {}".format(pod))
unready_pods.append(pod)
if unready_pods or not pods_arr:
wait_for_pods_to_be_ready = True
else:
wait_for_pods_to_be_ready = False
def create_nlb_and_add_targets(aws_nodes):
# Create internet-facing nlb and grab ARN & dns name
lb = AmazonWebServices().create_network_lb(name=AG_HOST_NAME + "-nlb")
lb_arn = lb["LoadBalancers"][0]["LoadBalancerArn"]
public_dns = lb["LoadBalancers"][0]["DNSName"]
# Create internal nlb and grab ARN & dns name
internal_lb = AmazonWebServices().create_network_lb(
name=AG_HOST_NAME + "-internal-nlb", scheme='internal')
internal_lb_arn = internal_lb["LoadBalancers"][0]["LoadBalancerArn"]
internal_lb_dns = internal_lb["LoadBalancers"][0]["DNSName"]
# Upsert the route53 record -- if it exists, update, if not, insert
AmazonWebServices().upsert_route_53_record_cname(
RANCHER_AG_INTERNAL_HOSTNAME, internal_lb_dns)
if RANCHER_HA_CERT_OPTION == 'byo-valid':
AmazonWebServices().upsert_route_53_record_cname(
RANCHER_AG_HOSTNAME, public_dns)
public_dns = RANCHER_AG_HOSTNAME
# Create the target groups
tg80 = AmazonWebServices(). \
create_ha_target_group(80, AG_HOST_NAME + "-tg-80")
tg443 = AmazonWebServices(). \
create_ha_target_group(443, AG_HOST_NAME + "-tg-443")
tg80_arn = tg80["TargetGroups"][0]["TargetGroupArn"]
tg443_arn = tg443["TargetGroups"][0]["TargetGroupArn"]
# Create the internal target groups
internal_tg80 = AmazonWebServices(). \
create_ha_target_group(80, AG_HOST_NAME + "-internal-tg-80")
internal_tg443 = AmazonWebServices(). \
create_ha_target_group(443, AG_HOST_NAME + "-internal-tg-443")
internal_tg80_arn = internal_tg80["TargetGroups"][0]["TargetGroupArn"]
internal_tg443_arn = internal_tg443["TargetGroups"][0]["TargetGroupArn"]
# Create listeners for the load balancers, to forward to the target groups
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=lb_arn, port=80, targetGroupARN=tg80_arn)
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=lb_arn, port=443, targetGroupARN=tg443_arn)
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=internal_lb_arn, port=80,
targetGroupARN=internal_tg80_arn)
AmazonWebServices().create_ha_nlb_listener(
loadBalancerARN=internal_lb_arn, port=443,
targetGroupARN=internal_tg443_arn)
targets = []
for aws_node in aws_nodes:
targets.append(aws_node.provider_node_id)
# Register the nodes to the internet-facing targets
targets_list = [dict(Id=target_id, Port=80) for target_id in targets]
AmazonWebServices().register_targets(targets_list, tg80_arn)
targets_list = [dict(Id=target_id, Port=443) for target_id in targets]
AmazonWebServices().register_targets(targets_list, tg443_arn)
# Wait up to approx. 5 minutes for targets to begin health checks
for i in range(300):
health80 = AmazonWebServices().describe_target_health(
tg80_arn)['TargetHealthDescriptions'][0]['TargetHealth']['State']
health443 = AmazonWebServices().describe_target_health(
tg443_arn)['TargetHealthDescriptions'][0]['TargetHealth']['State']
if health80 in ['initial', 'healthy'] \
and health443 in ['initial', 'healthy']:
break
time.sleep(1)
# Register the nodes to the internal targets
targets_list = [dict(Id=target_id, Port=80) for target_id in targets]
AmazonWebServices().register_targets(targets_list, internal_tg80_arn)
targets_list = [dict(Id=target_id, Port=443) for target_id in targets]
AmazonWebServices().register_targets(targets_list, internal_tg443_arn)
# Wait up to approx. 5 minutes for targets to begin health checks
for i in range(300):
try:
health80 = AmazonWebServices().describe_target_health(
internal_tg80_arn)[
'TargetHealthDescriptions'][0]['TargetHealth']['State']
health443 = AmazonWebServices().describe_target_health(
internal_tg443_arn)[
'TargetHealthDescriptions'][0]['TargetHealth']['State']
if health80 in ['initial', 'healthy'] \
and health443 in ['initial', 'healthy']:
break
except Exception:
print("Target group healthchecks unavailable...")
time.sleep(1)
return public_dns
def get_bastion_node(provider_id):
bastion_node = AmazonWebServices().get_node(provider_id, ssh_access=True)
if bastion_node is None:
pytest.fail("Did not provide a valid Provider ID for the bastion node")
return bastion_node
def setup_ssh_key(bastion_node):
# Copy SSH Key to Bastion and local dir and give it proper permissions
write_key_command = "cat <<EOT >> {}.pem\n{}\nEOT".format(
bastion_node.ssh_key_name, bastion_node.ssh_key)
bastion_node.execute_command(write_key_command)
local_write_key_command = \
"mkdir -p {} && cat <<EOT >> {}/{}.pem\n{}\nEOT".format(
SSH_KEY_DIR, SSH_KEY_DIR,
bastion_node.ssh_key_name, bastion_node.ssh_key)
run_command(local_write_key_command, log_out=False)
set_key_permissions_command = "chmod 400 {}.pem".format(
bastion_node.ssh_key_name)
bastion_node.execute_command(set_key_permissions_command)
local_set_key_permissions_command = "chmod 400 {}/{}.pem".format(
SSH_KEY_DIR, bastion_node.ssh_key_name)
run_command(local_set_key_permissions_command, log_out=False)
@pytest.fixture()
def check_hostname_length():
print("Host Name: {}".format(AG_HOST_NAME))
assert len(AG_HOST_NAME) < 17, "Provide hostname that is 16 chars or less"
| 33,676 | 42.623057 | 84 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_multi_cluster_app.py
|
from .common import create_catalog_external_id
from .common import create_project_and_ns
from .common import get_admin_client
from .common import get_defaut_question_answers
from .common import get_project_client_for_token
from .common import get_user_client
from .common import get_user_client_and_cluster
from .test_rke_cluster_provisioning import create_and_validate_custom_host
from .common import random_test_name
from .common import random_name
from .common import USER_TOKEN
from .common import validate_app_deletion
from .common import validate_response_app_endpoint
from .common import wait_for_app_to_active
from .common import wait_for_catalog_active
from .common import wait_for_mcapp_to_active
from .common import delete_node
import pytest
import time
project = {}
project_detail = {"c0_id": None, "c1_id": None, "c2_id": None,
"p0_id": None, "p1_id": None, "p2_id": None,
"p_client0": None, "namespace0": None,
"cluster0": None, "project0": None,
"p_client1": None, "namespace1": None,
"cluster1": None, "project1": None,
"p_client2": None, "namespace2": None,
"cluster2": None, "project2": None}
global_client = {"cluster_count": False}
PROJECT_ROLE = ["project-member"]
CATALOG_URL = "https://github.com/rancher/integration-test-charts.git"
BRANCH = "validation-tests"
CATALOG_NAME = random_test_name("test-catalog")
WORDPRESS_TEMPLATE_VID_738 = \
"cattle-global-data:" + CATALOG_NAME + "-wordpress-7.3.8"
MYSQL_TEMPLATE_VID_131 = "cattle-global-data:" + CATALOG_NAME + "-mysql-1.3.1"
MYSQL_TEMPLATE_VID_132 = "cattle-global-data:" + CATALOG_NAME + "-mysql-1.3.2"
GRAFANA_TEMPLATE_VID = "cattle-global-data:" + CATALOG_NAME + "-grafana-3.8.6"
WORDPRESS_EXTID = create_catalog_external_id(CATALOG_NAME,
"wordpress", "7.3.8")
MYSQL_EXTERNALID_131 = create_catalog_external_id(CATALOG_NAME,
"mysql", "1.3.1")
MYSQL_EXTERNALID_132 = create_catalog_external_id(CATALOG_NAME,
"mysql", "1.3.2")
GRAFANA_EXTERNALID = create_catalog_external_id(CATALOG_NAME,
"grafana", "3.8.6")
ROLLING_UPGRADE_STRATEGY = {
'rollingUpdate': {
'batchSize': 1,
'interval': 20,
'type': '/v3/schemas/rollingUpdate'},
'type': '/v3/schemas/upgradeStrategy'}
skip_test_rolling_update = pytest.mark.skipif(
reason="Skipping this test always "
"as for now its not in scope for automation")
def test_multi_cluster_app_create():
client = get_user_client()
assert_if_valid_cluster_count()
targets = []
for project_id in project:
targets.append({"projectId": project_id, "type": "target"})
answer_values = get_defaut_question_answers(client, WORDPRESS_EXTID)
mcapp = client.create_multiClusterApp(
templateVersionId=WORDPRESS_TEMPLATE_VID_738,
targets=targets,
roles=PROJECT_ROLE,
name=random_name(),
answers=[{"values": answer_values}])
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster_wordpress(mcapp)
client.delete(mcapp)
def test_multi_cluster_app_edit_template_upgrade():
client = get_user_client()
assert_if_valid_cluster_count()
targets = []
for project_id in project:
targets.append({"projectId": project_id, "type": "target"})
answer_values = \
get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
mcapp = client.create_multiClusterApp(
templateVersionId=MYSQL_TEMPLATE_VID_131,
targets=targets,
roles=PROJECT_ROLE,
name=random_name(),
answers=[{"values": answer_values}])
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
answer_values_new = get_defaut_question_answers(client,
MYSQL_EXTERNALID_132)
mcapp = client.update(mcapp,
roles=PROJECT_ROLE,
templateVersionId=MYSQL_TEMPLATE_VID_132,
answers=[{"values": answer_values_new}])
mcapp = client.reload(mcapp)
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
client.delete(mcapp)
def test_multi_cluster_app_delete():
assert_if_valid_cluster_count()
targets = []
for project_id in project:
targets.append({"projectId": project_id, "type": "target"})
client = get_user_client()
answer_values = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
mcapp = client.create_multiClusterApp(
templateVersionId=MYSQL_TEMPLATE_VID_131,
targets=targets,
roles=PROJECT_ROLE,
name=random_name(),
answers=[{"values": answer_values}])
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
delete_multi_cluster_app(mcapp, True)
def test_multi_cluster_app_template_rollback():
assert_if_valid_cluster_count()
targets = []
for projectid in project:
targets.append({"projectId": projectid, "type": "target"})
client = get_user_client()
answer_values = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
mcapp = client.create_multiClusterApp(
templateVersionId=MYSQL_TEMPLATE_VID_131,
targets=targets,
roles=PROJECT_ROLE,
name=random_name(),
answers=[{"values": answer_values}])
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
first_id = mcapp["status"]["revisionId"]
assert mcapp.templateVersionId == MYSQL_TEMPLATE_VID_131
answer_values_new = get_defaut_question_answers(
client, MYSQL_EXTERNALID_132)
mcapp = client.update(mcapp,
roles=PROJECT_ROLE,
templateVersionId=MYSQL_TEMPLATE_VID_132,
answers=[{"values": answer_values_new}])
mcapp = client.reload(mcapp)
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
assert mcapp.templateVersionId == MYSQL_TEMPLATE_VID_132
mcapp.rollback(revisionId=first_id)
mcapp = client.reload(mcapp)
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
assert mcapp.templateVersionId == MYSQL_TEMPLATE_VID_131
client.delete(mcapp)
def test_multi_cluster_upgrade_and_add_target():
assert_if_valid_cluster_count()
project_id = project_detail["p0_id"]
targets = [{"projectId": project_id, "type": "target"}]
project_id_2 = project_detail["p1_id"]
client = get_user_client()
answer_values = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
mcapp = client.create_multiClusterApp(
templateVersionId=MYSQL_TEMPLATE_VID_131,
targets=targets,
roles=PROJECT_ROLE,
name=random_name(),
answers=[{"values": answer_values}])
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
uuid = mcapp.uuid
name = mcapp.name
assert len(client.list_multiClusterApp(
uuid=uuid, name=name).data[0]["targets"]) == 1, \
"did not start with 1 target"
mcapp.addProjects(projects=[project_id_2])
mcapp = client.reload(mcapp)
mcapp = wait_for_mcapp_to_active(client, mcapp)
assert len(client.list_multiClusterApp(
uuid=uuid, name=name).data[0]["targets"]) == 2, "did not add target"
validate_multi_cluster_app_cluster(mcapp)
client.delete(mcapp)
def test_multi_cluster_upgrade_and_delete_target():
assert_if_valid_cluster_count()
project_id = project_detail["p0_id"]
targets = []
for project_id in project:
targets.append({"projectId": project_id, "type": "target"})
client = get_user_client()
answer_values = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
mcapp = client.create_multiClusterApp(
templateVersionId=MYSQL_TEMPLATE_VID_131,
targets=targets,
roles=PROJECT_ROLE,
name=random_name(),
answers=[{"values": answer_values}])
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
uuid = mcapp.uuid
name = mcapp.name
assert len(client.list_multiClusterApp(
uuid=uuid, name=name).data[0]["targets"]) == 2, \
"did not start with 2 targets"
project_client = project_detail["p_client0"]
app = mcapp.targets[0].projectId.split(":")
app1id = app[1] + ":" + mcapp.targets[0].appId
client.action(obj=mcapp, action_name="removeProjects",
projects=[project_id])
mcapp = client.reload(mcapp)
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
assert len(mcapp["targets"]) == 1, "did not delete target"
validate_app_deletion(project_client, app1id)
client.delete(mcapp)
def test_multi_cluster_role_change():
assert_if_valid_cluster_count()
targets = []
for projectid in project:
targets.append({"projectId": projectid, "type": "target"})
client = get_user_client()
original_role = ["project-member"]
answer_values = get_defaut_question_answers(client, GRAFANA_EXTERNALID)
mcapp = client.create_multiClusterApp(
templateVersionId=GRAFANA_TEMPLATE_VID,
targets=targets,
roles=original_role,
name=random_name(),
answers=[{"values": answer_values}])
try:
mcapp = wait_for_mcapp_to_active(client, mcapp, 10)
except AssertionError:
print("expected failure as project member")
pass # expected fail
mcapp = client.update(mcapp, roles=["cluster-owner"])
client.reload(mcapp)
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
client.delete(mcapp)
def test_multi_cluster_project_answer_override():
assert_if_valid_cluster_count()
targets = []
for projectid in project:
targets.append({"projectId": projectid, "type": "target"})
client = get_user_client()
answer_values = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
mcapp = client.create_multiClusterApp(
templateVersionId=MYSQL_TEMPLATE_VID_131,
targets=targets,
roles=PROJECT_ROLE,
name=random_name(),
answers=[{"values": answer_values}])
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
answers_override = {
"clusterId": None,
"projectId": project_detail["p0_id"],
"type": "/v3/schemas/answer",
"values": {
"mysqlUser": "test_override"}
}
mysql_override = []
mysql_override.extend([{"values": answer_values}, answers_override])
mcapp = client.update(mcapp,
roles=PROJECT_ROLE,
answers=mysql_override)
mcapp = client.reload(mcapp)
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
projectId_answer_override = project_detail["p0_id"]
validate_answer_override(mcapp,
projectId_answer_override,
answers_override,
False)
client.delete(mcapp)
def test_multi_cluster_cluster_answer_override():
assert_if_valid_cluster_count()
client = get_user_client()
cluster1 = project_detail["cluster1"]
p3, ns3 = create_project_and_ns(
USER_TOKEN, cluster1, random_test_name("mcapp-3"))
p_client2 = get_project_client_for_token(p3, USER_TOKEN)
project_detail["c2_id"] = cluster1.id
project_detail["namespace2"] = ns3
project_detail["p2_id"] = p3.id
project_detail["p_client2"] = p_client2
project_detail["cluster2"] = cluster1
project_detail["project2"] = p3
project[p3.id] = project_detail
client = global_client["client"]
targets = []
for projectid in project:
targets.append({"projectId": projectid, "type": "target"})
answer_values = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
mcapp = client.create_multiClusterApp(
templateVersionId=MYSQL_TEMPLATE_VID_131,
targets=targets,
roles=PROJECT_ROLE,
name=random_name(),
answers=[{"values": answer_values}])
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
answers_override_cluster = {
"clusterId": project_detail["c0_id"],
"projectId": None,
"type": "/v3/schemas/answer",
"values": {
"mysqlUser": "test_override"}
}
mysql_override_cluster = []
mysql_override_cluster.extend([{"values": answer_values},
answers_override_cluster])
clusterId_answer_override = project_detail["c0_id"]
mcapp = client.update(mcapp,
roles=PROJECT_ROLE,
answers=mysql_override_cluster)
mcapp = client.reload(mcapp)
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
validate_answer_override(mcapp,
clusterId_answer_override,
answers_override_cluster)
client.delete(mcapp)
def test_multi_cluster_all_answer_override():
assert_if_valid_cluster_count()
targets = []
for projectid in project:
targets.append({"projectId": projectid, "type": "target"})
client = get_user_client()
answer_values = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
mcapp = client.create_multiClusterApp(
templateVersionId=MYSQL_TEMPLATE_VID_131,
targets=targets,
roles=PROJECT_ROLE,
name=random_name(),
answers=[{"values": answer_values}])
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
new_answers = {"values": answer_values}
new_answers["values"]["mysqlUser"] = "root"
mcapp = client.update(mcapp,
roles=PROJECT_ROLE,
answers=[new_answers])
mcapp = client.reload(mcapp)
mcapp = wait_for_mcapp_to_active(client, mcapp)
validate_multi_cluster_app_cluster(mcapp)
validate_all_answer_override_mca(mcapp)
client.delete(mcapp)
@skip_test_rolling_update
def test_multi_cluster_rolling_upgrade():
assert_if_valid_cluster_count()
targets = []
for projectid in project:
targets.append({"projectId": projectid, "type": "target"})
client = get_user_client()
answer_values = get_defaut_question_answers(client, MYSQL_EXTERNALID_131)
mcapp = client.create_multiClusterApp(
templateVersionId=MYSQL_TEMPLATE_VID_131,
targets=targets,
roles=PROJECT_ROLE,
name=random_name(),
answers=[{"values": answer_values}],
upgradeStrategy=ROLLING_UPGRADE_STRATEGY)
mcapp = wait_for_mcapp_to_active(client, mcapp)
new_answers = {"values": answer_values}
new_answers["values"]["mysqlUser"] = "admin1234"
mcapp = client.update(mcapp,
roles=["cluster-owner"],
answers=[new_answers])
mcapp = client.reload(mcapp)
app_info = {"p_client": None, "app_id": None}
app_info_2 = {"p_client": None, "app_id": None}
start = time.time()
end = time.time()
time.sleep(5)
app_state = []
for i in range(0, len(mcapp.targets)):
app_id = mcapp.targets[i].appId
assert app_id is not None, "app_id is None"
project_client = project_detail["p_client" + str(i)]
app_detail = project_client.list_app(id=app_id).data[0]
app_state.append(app_detail.state)
if app_detail.state == "active":
app_info["p_client"] = project_client
app_info["app_id"] = app_id
else:
app_info_2["p_client"] = project_client
app_info_2["app_id"] = app_id
assert app_state.count("active") == 1, "Only one app should be upgrading"
print("app_state: ", app_state)
# check interval time is 20 seconds
while True:
app = app_info["p_client"].list_app(id=app_info["app_id"]).data[0]
app2 = app_info_2["p_client"].list_app(id=app_info_2["app_id"]).data[0]
if app2.state == "active":
start_1 = time.time()
if app.state != "active":
end = time.time()
break
print("Start: ", start)
print("Start_1: ", start_1)
print("End: ", end)
print(end - start)
print(end - start_1)
validate_multi_cluster_app_cluster(mcapp)
client.delete(mcapp)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
node_roles = [["controlplane", "etcd", "worker"],
["worker"], ["worker"]]
cluster_list = []
cluster, aws_nodes = create_and_validate_custom_host(node_roles, True)
client, cluster_existing = get_user_client_and_cluster()
admin_client = get_admin_client()
cluster_list.append(cluster_existing)
cluster_list.append(cluster)
if len(cluster_list) > 1:
global_client["cluster_count"] = True
assert_if_valid_cluster_count()
p1, ns1 = create_project_and_ns(
USER_TOKEN, cluster_list[0], random_test_name("mcapp-1"))
p_client1 = get_project_client_for_token(p1, USER_TOKEN)
p2, ns2 = create_project_and_ns(
USER_TOKEN, cluster_list[1], random_test_name("mcapp-2"))
p_client2 = get_project_client_for_token(p2, USER_TOKEN)
project_detail["c0_id"] = cluster_list[0].id
project_detail["p0_id"] = p1.id
project_detail["namespace0"] = ns1
project_detail["p_client0"] = p_client1
project_detail["cluster0"] = cluster_list[0]
project_detail["project0"] = p1
project[p1.id] = project_detail
project_detail["c1_id"] = cluster_list[1].id
project_detail["namespace1"] = ns2
project_detail["p1_id"] = p2.id
project_detail["p_client1"] = p_client2
project_detail["cluster1"] = cluster_list[1]
project_detail["project1"] = p2
project[p2.id] = project_detail
global_client["client"] = client
catalog = admin_client.create_catalog(
name=CATALOG_NAME,
baseType="catalog",
branch=BRANCH,
kind="helm",
url=CATALOG_URL)
catalog = wait_for_catalog_active(admin_client, catalog)
def fin():
admin_client.delete(catalog)
admin_client.delete(p1)
admin_client.delete(p2)
admin_client.delete(project_detail["project2"])
admin_client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
request.addfinalizer(fin)
def assert_if_valid_cluster_count():
assert global_client["cluster_count"], \
"Setup Failure. Tests require at least 2 clusters"
def validate_multi_cluster_app_cluster_wordpress(multiclusterapp):
for i in range(0, len(multiclusterapp.targets)):
app_id = multiclusterapp.targets[i].appId
assert app_id is not None, "app_id is None"
project_client = project_detail["p_client"+str(i)]
wait_for_app_to_active(project_client, app_id)
validate_app_version(project_client, multiclusterapp, app_id)
validate_response_app_endpoint(project_client, app_id)
def validate_multi_cluster_app_cluster(multiclusterapp):
for i in range(0, len(multiclusterapp.targets)):
app_id = multiclusterapp.targets[i].appId
assert app_id is not None, "app_id is None"
project_client = project_detail["p_client"+str(i)]
wait_for_app_to_active(project_client, app_id)
validate_app_version(project_client, multiclusterapp, app_id)
def delete_multi_cluster_app(multiclusterapp, validation=False):
client = global_client["client"]
uuid = multiclusterapp.uuid
name = multiclusterapp.name
client.delete(multiclusterapp)
if validation:
mcapps = client.list_multiClusterApp(uuid=uuid, name=name).data
assert len(mcapps) == 0, "Multi Cluster App is not deleted"
for i in range(1, len(multiclusterapp.targets)):
app_id = multiclusterapp.targets[i].appId
assert app_id is not None, "app_id is None"
project_client = project_detail["p_client" + str(i)]
validate_app_deletion(project_client, app_id)
def validate_app_version(project_client, multiclusterapp, app_id):
temp_version = multiclusterapp.templateVersionId
app = temp_version.split(":")[1].split("-")
catalog_name = app[0] + "-" + app[1] + "-" + app[2]
mcapp_template_version = "catalog://?catalog=" + catalog_name + \
"&template=" + app[3] + "&version=" + app[4]
app_template_version = \
project_client.list_app(name=app_id).data[0].externalId
assert mcapp_template_version == app_template_version, \
"App Id is different from the Multi cluster app id"
def return_application_status_and_upgrade(client1, app_id1, client2, app_id2):
app_data1 = client1.list_app(id=app_id1).data
application1 = app_data1[0]
app_data2 = client2.list_app(id=app_id2).data
application2 = app_data2[0]
a = application1.state == "active" \
and application1.answers["mysqlUser"] == "admin1234"
b = application2.state == "active" \
and application2.answers["mysqlUser"] == "admin1234"
return a is True and b is not True
def validate_app_upgrade_mca(multiclusterapp):
for i in range(0, len(multiclusterapp.targets)):
project_client = project_detail["p_client" + str(i)]
app = multiclusterapp.targets[0].projectId.split(":")
appid = app[1] + ":" + multiclusterapp.targets[i].appId
temp_version = multiclusterapp.templateVersionId
app = temp_version.split(":")[1].split("-")
mcapp_template_version = "catalog://?catalog=" + app[0] + \
"&template=" + app[1] + "&version=" \
+ app[2]
app_template_version = \
project_client.list_app(id=appid).data[0].externalId
assert mcapp_template_version == app_template_version, \
"App Id is different from the Multi cluster app id"
def validate_deletion_mca(multiclusterapp):
for i in range(0, len(multiclusterapp.targets)):
app_id = multiclusterapp.targets[i].appId
assert app_id is not None, "app_id is None"
project_client = project_detail["p_client"+str(i)]
app = multiclusterapp.targets[i].projectId.split(":")
app1id = app[1] + ":" + multiclusterapp.targets[i].appId
validate_app_deletion(project_client, app1id)
def validate_all_answer_override_mca(multiclusterapp):
for i in range(0, len(multiclusterapp.targets)):
project_client = project_detail["p_client" + str(i)]
app = multiclusterapp.targets[0].projectId.split(":")
appid = app[1] + ":" + multiclusterapp.targets[i].appId
hold = multiclusterapp['answers'][0]
val = hold["values"]
app_answers = \
project_client.list_app(id=appid).data[0].answers
assert str(val) == str(app_answers), \
"App answers are different than the Multi cluster answers"
def validate_answer_override(multiclusterapp, id,
answers_override, cluster=True):
for i in range(0, len(multiclusterapp.targets)):
project_client = project_detail["p_client"+str(i)]
app_id = multiclusterapp.targets[i].appId
target_project_id = multiclusterapp.targets[i].projectId
target_clusterId = target_project_id.split(":")[0]
app_answers = project_client.list_app(id=app_id).data[0].answers
if not cluster:
if target_project_id == id:
assert answers_override["values"]["mysqlUser"] == \
app_answers.get("mysqlUser"), \
"Answers are not available on the expected project"
else:
assert app_answers.get("mysqlUser") == "admin", \
"answers should not have changed"
else:
if target_clusterId == id:
assert answers_override["values"]["mysqlUser"] == \
app_answers.get("mysqlUser"), \
"Answers are not available on the expected project"
else:
assert app_answers.get("mysqlUser") == "admin", \
"answers should not have changed"
| 24,783 | 39.965289 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_registry.py
|
"""
This file contains test related to add registry and
deploying workloads with those registry.
Test requirement:
Below Env variables need to set
CATTLE_TEST_URL - url to rancher server
ADMIN_TOKEN - Admin token from rancher
USER_TOKEN - User token from rancher
RANCHER_CLUSTER_NAME - Cluster name to run test on
RANCHER_REGISTRY - quay.io, dockerhub, custom etc
RANCHER_TEST_CLIENT_IMAGE - Path to image eg. quay.io/myimage/ubuntuimage
RANCHER_TEST_RBAC - Boolean (Optional), To run rbac tests
"""
from .common import * # NOQA
CLUSTER_NAME = os.environ.get("CLUSTER_NAME", "")
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
REGISTRY_USER_NAME = os.environ.get('RANCHER_REGISTRY_USER_NAME', "None")
REGISTRY_PASSWORD = os.environ.get('RANCHER_REGISTRY_PASSWORD', "None")
TEST_CLIENT_IMAGE = os.environ.get('RANCHER_TEST_CLIENT_IMAGE', "None")
REGISTRY = os.environ.get('RANCHER_REGISTRY', "None")
rbac_role_list = [
CLUSTER_OWNER,
CLUSTER_MEMBER,
PROJECT_OWNER,
PROJECT_MEMBER,
PROJECT_READ_ONLY
]
def test_create_registry_single_namespace():
"""
This test creates a namespace and creates a registry.
Validates the workload created in same namespace using registry image
comes up active.
"""
p_client = namespace["p_client"]
ns = namespace["ns"]
registry = create_registry_validate_workload(p_client, ns)
delete_registry(p_client, registry)
def test_create_registry_all_namespace():
p_client = namespace["p_client"]
c_client = namespace["c_client"]
cluster = namespace["cluster"]
project = namespace["project"]
ns = namespace["ns"]
registry = create_registry_validate_workload(p_client, ns, allns=True)
# Create and validate workload in a new namespace
new_ns = create_ns(c_client, cluster, project)
create_validate_workload(p_client, new_ns)
delete_registry(p_client, registry)
def test_delete_registry_all_namespace():
p_client = namespace["p_client"]
c_client = namespace["c_client"]
cluster = namespace["cluster"]
project = namespace["project"]
ns = namespace["ns"]
new_ns = create_ns(c_client, cluster, project)
registry = create_registry_validate_workload(p_client, ns, allns=True)
delete_registry(p_client, registry)
print("Verify workloads cannot be created in all the namespaces")
create_validate_workload_with_invalid_registry(p_client, ns)
create_validate_workload_with_invalid_registry(p_client, new_ns)
def test_delete_registry_single_namespace():
p_client = namespace["p_client"]
ns = namespace["ns"]
registry = create_registry_validate_workload(p_client, ns)
delete_registry(p_client, registry)
print("Verify workload cannot be created in the namespace after registry")
"deletion"
create_validate_workload_with_invalid_registry(p_client, ns)
def test_edit_registry_single_namespace():
p_client = namespace["p_client"]
ns = namespace["ns"]
name = random_test_name("registry")
# Create registry with invalid username and password
registries = {REGISTRY: {"username": "testabc",
"password": "abcdef"}}
registry = p_client.create_namespacedDockerCredential(
registries=registries, name=name,
namespaceId=ns.id)
create_validate_workload_with_invalid_registry(p_client, ns)
# Update registry with valid username and password
new_registries = {REGISTRY: {"username": REGISTRY_USER_NAME,
"password": REGISTRY_PASSWORD}}
p_client.update(registry, name=registry.name,
namespaceId=ns['name'],
registries=new_registries)
# Validate workload after registry update
create_validate_workload(p_client, ns)
delete_registry(p_client, registry)
def test_edit_registry_all_namespace():
p_client = namespace["p_client"]
c_client = namespace["c_client"]
cluster = namespace["cluster"]
project = namespace["project"]
ns = namespace["ns"]
name = random_test_name("registry")
# Create registry with invalid username and password
registries = {REGISTRY: {"username": "testabc",
"password": "abcdef"}}
registry = p_client.create_dockerCredential(
registries=registries, name=name)
create_validate_workload_with_invalid_registry(p_client, ns)
# Update registry with correct username and password
new_registries = {REGISTRY: {"username": REGISTRY_USER_NAME,
"password": REGISTRY_PASSWORD}}
p_client.update(registry, name=registry.name,
registries=new_registries)
new_ns = create_ns(c_client, cluster, project)
# Validate workload in all namespaces after registry update
create_validate_workload(p_client, ns)
create_validate_workload(p_client, new_ns)
delete_registry(p_client, registry)
def test_registry_cross_namespace_access():
"""
This test creates two namespace and creates registry for first namespace.
It creates two workload in each namespace.
Validates workload created in first namespace comes up healthy but workload
in 2nd workspace doesn't become healthy.
"""
p_client = namespace["p_client"]
c_client = namespace["c_client"]
cluster = namespace["cluster"]
project = namespace["project"]
ns_1 = namespace["ns"]
ns_2 = create_ns(c_client, cluster, project)
registry = create_registry_validate_workload(p_client, ns_1)
create_validate_workload_with_invalid_registry(p_client, ns_2)
delete_registry(p_client, registry)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_registry_create_single_namespace(role):
"""
Creates registry with given role to a single namespace
Runs only if RANCHER_TEST_RBAC is True in env. variable
@param role: User role in rancher eg. project owner, project member etc
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
registry = create_registry_validate_workload(p_client, ns)
assert e.value.error.status == 403
assert e.value.error.code == 'PermissionDenied'
else:
registry = create_registry_validate_workload(p_client, ns)
delete_registry(p_client, registry)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_registry_create_all_namespace(role):
"""
Runs only if RANCHER_TEST_RBAC is True in env. variable.
Creates registry scoped all namespace and for
multiple role passed in parameter.
@param role: User role in rancher eg. project owner, project member etc
"""
c_client = namespace["c_client"]
cluster = namespace["cluster"]
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
registry = \
create_registry_validate_workload(p_client, ns, allns=True)
assert e.value.error.status == 403
assert e.value.error.code == 'PermissionDenied'
else:
registry = create_registry_validate_workload(p_client, ns, allns=True)
# Create and validate workload in a new namespace
new_ns = create_ns(c_client, cluster, project)
create_validate_workload(p_client, new_ns)
delete_registry(p_client, registry)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_registry_delete_single_namespace(role):
"""
Runs only if RANCHER_TEST_RBAC is True in env. variable.
Creates a registry for single namespace, deploys a workload
and delete registry afterwards.
Validates the workload which has become invalid on registry deletion.
@param role: User role in rancher eg. project owner, project member etc
"""
c_owner_token = rbac_get_user_token_by_role(rbac_role_list[0])
project = rbac_get_project()
p_client_for_c_owner = get_project_client_for_token(project, c_owner_token)
token = rbac_get_user_token_by_role(role)
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
registry = create_registry_validate_workload(p_client_for_c_owner, ns)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
delete_registry(p_client, registry)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
delete_registry(p_client_for_c_owner, registry)
else:
delete_registry(p_client, registry)
print("Verify workload cannot be created in the namespace after "
"registry deletion")
create_validate_workload_with_invalid_registry(p_client, ns)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_registry_delete_all_namespace(role):
"""
Runs only if RANCHER_TEST_RBAC is True in env. variable.
Creates a registry scoped for all namespace, deploys a workload
and delete registry afterwards.
Validates the workload which has become invalid on registry deletion.
@param role: User role in rancher eg. project owner, project member etc
"""
c_client = namespace["c_client"]
cluster = namespace["cluster"]
c_owner_token = rbac_get_user_token_by_role(rbac_role_list[0])
project = rbac_get_project()
p_client_for_c_owner = get_project_client_for_token(project, c_owner_token)
token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(project, token)
ns = rbac_get_namespace()
new_ns = create_ns(c_client, cluster, project)
registry = \
create_registry_validate_workload(p_client_for_c_owner, ns)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
delete_registry(p_client, registry)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
delete_registry(p_client_for_c_owner, registry)
else:
delete_registry(p_client, registry)
print("Verify workloads cannot be created in all the namespaces")
create_validate_workload_with_invalid_registry(p_client, ns)
create_validate_workload_with_invalid_registry(p_client, new_ns)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_registry_edit_single_namespace(role):
"""
Runs only if RANCHER_TEST_RBAC is True in env. variable.
Creates registry with invalid credential for single namespace,
deploys workload with invalid registry and validate the workload
is not up.
Update the registry with correct credential and validates workload.
@param role: User role in rancher eg. project owner, project member etc
"""
c_owner_token = rbac_get_user_token_by_role(rbac_role_list[0])
project = rbac_get_project()
p_client_for_c_owner = get_project_client_for_token(project, c_owner_token)
token = rbac_get_user_token_by_role(role)
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
name = random_test_name("registry")
# registry with invalid username and password
registries = {REGISTRY: {"username": "testabc",
"password": "abcdef"}}
# registry with valid username and password
new_registries = {REGISTRY: {"username": REGISTRY_USER_NAME,
"password": REGISTRY_PASSWORD}}
# Create registry with wrong credentials
registry = p_client_for_c_owner.create_namespacedDockerCredential(
registries=registries, name=name, namespaceId=ns.id)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.update(registry, name=registry.name,
namespaceId=ns['name'],
registries=new_registries)
assert e.value.error.status == 404
assert e.value.error.code == 'NotFound'
delete_registry(p_client_for_c_owner, registry)
else:
create_validate_workload_with_invalid_registry(p_client, ns)
# Update registry with valid username and password
p_client.update(registry, name=registry.name,
namespaceId=ns['name'],
registries=new_registries)
# Validate workload after registry update
create_validate_workload(p_client, ns)
delete_registry(p_client, registry)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_registry_edit_all_namespace(role):
"""
Runs only if RANCHER_TEST_RBAC is True in env. variable.
Creates registry with invalid credential scoped for all namespace,
deploys workload with invalid registry and validate the workload
is not up.
Update the registry with correct credential and validates workload.
@param role: User role in rancher eg. project owner, project member etc
@param role:
"""
c_client = namespace["c_client"]
cluster = namespace["cluster"]
c_owner_token = rbac_get_user_token_by_role(rbac_role_list[0])
project = rbac_get_project()
p_client_for_c_owner = get_project_client_for_token(project, c_owner_token)
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
p_client = get_project_client_for_token(project, token)
ns = rbac_get_namespace()
name = random_test_name("registry")
registries = {REGISTRY: {"username": "testabc",
"password": "abcdef"}}
new_registries = {REGISTRY: {"username": REGISTRY_USER_NAME,
"password": REGISTRY_PASSWORD}}
# Create registry with invalid username and password
registry = p_client_for_c_owner.create_dockerCredential(
registries=registries, name=name)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.update(registry, name=registry.name,
registries=new_registries)
assert e.value.error.status == 404
assert e.value.error.code == 'NotFound'
delete_registry(p_client_for_c_owner, registry)
else:
create_validate_workload_with_invalid_registry(p_client, ns)
# Update registry with correct username and password
p_client.update(registry, name=registry.name,
registries=new_registries)
new_ns = create_ns(c_client, cluster, project)
# Validate workload in all namespaces after registry update
create_validate_workload(p_client, ns)
create_validate_workload(p_client, new_ns)
delete_registry(p_client_for_c_owner, registry)
@if_test_rbac
def test_rbac_registry_cross_project_access():
"""
Get project1 and namespace1 from project owner role
Creates project2 and namespace2 using same user.
Creates registry in project1 and try to creates workload
in project2.
"""
cluster = namespace["cluster"]
token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, token)
project_2, ns_2 = create_project_and_ns(token, cluster, "testproject2")
p2_client = get_project_client_for_token(project_2, token)
# Create registry in project 1
registry = create_registry_validate_workload(p_client, ns, allns=True)
# Create workload in project 2 and validate
create_validate_workload_with_invalid_registry(p2_client, ns_2)
delete_registry(p_client, registry)
p_client.delete(project_2)
def delete_registry(client, registry):
c_client = namespace["c_client"]
project = namespace["project"]
print("Project ID")
print(project.id)
registryname = registry.name
client.delete(registry)
time.sleep(5)
# Sleep to allow for the registry to be deleted
print("Registry list after deleting registry")
registrydict = client.list_dockerCredential(name=registryname).data
print(registrydict)
assert len(registrydict) == 0, "Unable to delete registry"
namespacedict = c_client.list_namespace(projectId=project.id).data
print("List of namespaces")
print(namespacedict)
len_namespace = len(namespacedict)
namespaceData = namespacedict
# Registry is essentially a secret, deleting the registry should delete
# the secret. Verify secret is deleted by "kubectl get secret" command
# for each of the namespaces
for i in range(0, len_namespace):
ns_name = namespaceData[i]['name']
print(i, ns_name)
command = " get secret " + registryname + " --namespace=" + ns_name
print("Command to obtain the secret")
print(command)
result = execute_kubectl_cmd(command, json_out=False, stderr=True)
print(result)
print("Verify that the secret does not exist "
"and the error code returned is non zero ")
assert result != 0, "Error code is 0!"
def create_registry_validate_workload(p_client, ns=None, allns=False):
name = random_test_name("registry")
print(REGISTRY_USER_NAME)
print(REGISTRY_PASSWORD)
registries = {REGISTRY: {"username": REGISTRY_USER_NAME,
"password": REGISTRY_PASSWORD}}
if allns:
registry = p_client.create_dockerCredential(
registries=registries, name=name)
else:
registry = p_client.create_namespacedDockerCredential(
registries=registries, name=name,
namespaceId=ns.id)
create_validate_workload(p_client, ns)
return registry
def create_workload(p_client, ns):
workload_name = random_test_name("newtestwk")
con = [{"name": "test",
"image": TEST_CLIENT_IMAGE,
"runAsNonRoot": False,
"stdin": True,
"imagePullPolicy": "Always"
}]
workload = p_client.create_workload(name=workload_name,
containers=con,
namespaceId=ns.id)
return workload
def create_validate_workload(p_client, ns):
workload = create_workload(p_client, ns)
workload = p_client.reload(workload)
validate_workload(p_client, workload, "deployment", ns.name)
p_client.delete(workload)
def create_validate_workload_with_invalid_registry(p_client, ns):
workload = create_workload(p_client, ns)
# Validate workload fails to come up active
validate_wl_fail_to_pullimage(p_client, workload)
workload = p_client.reload(workload)
print(workload)
assert workload.state != "active", "Invalid workload came up active!"
p_client.delete(workload)
def validate_wl_fail_to_pullimage(client, workload, timeout=DEFAULT_TIMEOUT):
"""
This method checks if workload is failing to pull image
@param client: Project client object
@param workload: Workload to test on
@param timeout: Max time of waiting for failure
"""
time.sleep(2)
start = time.time()
pods = client.list_pod(workloadId=workload.id).data
assert len(pods) != 0, "No pods in workload - {}".format(workload)
message = pods[0].containers[0].transitioningMessage
while 'ImagePullBackOff:' not in message:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for workload to get "
"'ImagePullBackOff' error")
time.sleep(1)
pods = client.list_pod(workloadId=workload.id).data
assert len(pods) != 0, "No pods in workload - {}".format(workload)
message = pods[0].containers[0].transitioningMessage
print("{} - fails to pull image".format(workload))
@ pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testregistry")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
| 20,831 | 37.223853 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_create_ha.py
|
from python_terraform import * # NOQA
from pkg_resources import packaging
from .common import * # NOQA
from .test_boto_create_eks import get_eks_kubeconfig
from .test_import_k3s_cluster import create_multiple_control_cluster
from .test_rke_cluster_provisioning import rke_config
# RANCHER_HA_KUBECONFIG and RANCHER_HA_HOSTNAME are provided
# when installing Rancher into a k3s setup
RANCHER_HA_KUBECONFIG = os.environ.get("RANCHER_HA_KUBECONFIG")
RANCHER_HA_HOSTNAME = os.environ.get(
"RANCHER_HA_HOSTNAME", RANCHER_HOSTNAME_PREFIX + ".qa.rancher.space")
resource_prefix = RANCHER_HA_HOSTNAME.split(".qa.rancher.space")[0]
RANCHER_SERVER_URL = "https://" + RANCHER_HA_HOSTNAME
RANCHER_CHART_VERSION = os.environ.get("RANCHER_CHART_VERSION")
RANCHER_HELM_EXTRA_SETTINGS = os.environ.get("RANCHER_HELM_EXTRA_SETTINGS")
RANCHER_IMAGE_TAG = os.environ.get("RANCHER_IMAGE_TAG")
RANCHER_HELM_REPO = os.environ.get("RANCHER_HELM_REPO", "latest")
RANCHER_LETSENCRYPT_EMAIL = os.environ.get("RANCHER_LETSENCRYPT_EMAIL")
# Here is the list of cert types for HA install
# [rancher-self-signed, byo-valid, byo-self-signed, letsencrypt]
RANCHER_HA_CERT_OPTION = os.environ.get("RANCHER_HA_CERT_OPTION",
"rancher-self-signed")
RANCHER_VALID_TLS_CERT = os.environ.get("RANCHER_VALID_TLS_CERT")
RANCHER_VALID_TLS_KEY = os.environ.get("RANCHER_VALID_TLS_KEY")
RANCHER_BYO_TLS_CERT = os.environ.get("RANCHER_BYO_TLS_CERT")
RANCHER_BYO_TLS_KEY = os.environ.get("RANCHER_BYO_TLS_KEY")
RANCHER_PRIVATE_CA_CERT = os.environ.get("RANCHER_PRIVATE_CA_CERT")
RANCHER_LOCAL_CLUSTER_TYPE = os.environ.get("RANCHER_LOCAL_CLUSTER_TYPE")
RANCHER_ADD_CUSTOM_CLUSTER = os.environ.get("RANCHER_ADD_CUSTOM_CLUSTER",
"True")
KUBERNETES_VERSION = os.environ.get("RANCHER_LOCAL_KUBERNETES_VERSION","")
RANCHER_K3S_VERSION = os.environ.get("RANCHER_K3S_VERSION", "")
kubeconfig_path = DATA_SUBDIR + "/kube_config_cluster-ha-filled.yml"
export_cmd = "export KUBECONFIG=" + kubeconfig_path
def test_remove_rancher_ha():
assert CATTLE_TEST_URL.endswith(".qa.rancher.space"), \
"the CATTLE_TEST_URL need to end with .qa.rancher.space"
if not check_if_ok(CATTLE_TEST_URL):
print("skip deleting clusters within the setup")
else:
print("the CATTLE_TEST_URL is accessible")
admin_token = get_user_token("admin", ADMIN_PASSWORD)
client = get_client_for_token(admin_token)
# delete clusters except the local cluster
clusters = client.list_cluster(id_ne="local").data
print("deleting the following clusters: {}"
.format([cluster.name for cluster in clusters]))
for cluster in clusters:
print("deleting the following cluster : {}".format(cluster.name))
delete_cluster(client, cluster)
resource_prefix = \
CATTLE_TEST_URL.split(".qa.rancher.space")[0].split("//")[1]
delete_resource_in_AWS_by_prefix(resource_prefix)
def test_install_rancher_ha(precheck_certificate_options):
cm_install = True
extra_settings = []
if "byo-" in RANCHER_HA_CERT_OPTION:
cm_install = False
print("The hostname is: {}".format(RANCHER_HA_HOSTNAME))
# prepare an RKE cluster and other resources
# if no kubeconfig file is provided
if RANCHER_HA_KUBECONFIG is None:
if RANCHER_LOCAL_CLUSTER_TYPE == "RKE":
print("RKE cluster is provisioning for the local cluster")
nodes = create_resources()
config_path = create_rke_cluster_config(nodes)
create_rke_cluster(config_path)
elif RANCHER_LOCAL_CLUSTER_TYPE == "K3S":
print("K3S cluster is provisioning for the local cluster")
k3s_kubeconfig_path = \
create_multiple_control_cluster()
cmd = "cp {0} {1}".format(k3s_kubeconfig_path, kubeconfig_path)
run_command_with_stderr(cmd)
elif RANCHER_LOCAL_CLUSTER_TYPE == "EKS":
create_resources_eks()
eks_kubeconfig_path = get_eks_kubeconfig(resource_prefix +
"-ekscluster")
cmd = "cp {0} {1}".format(eks_kubeconfig_path, kubeconfig_path)
run_command_with_stderr(cmd)
install_eks_ingress()
extra_settings.append(
"--set ingress."
"extraAnnotations.\"kubernetes\\.io/ingress\\.class\"=nginx"
)
elif RANCHER_LOCAL_CLUSTER_TYPE == "AKS":
create_aks_cluster()
install_aks_ingress()
extra_settings.append(
"--set ingress."
"extraAnnotations.\"kubernetes\\.io/ingress\\.class\"=nginx"
)
else:
write_kubeconfig()
# wait until the cluster is ready
def valid_response():
output = run_command_with_stderr(export_cmd + " && kubectl get nodes")
return "Ready" in output.decode()
try:
wait_for(valid_response)
except Exception as e:
print("Error: {0}".format(e))
assert False, "check the logs in console for details"
print_kubeconfig()
if RANCHER_LOCAL_CLUSTER_TYPE == "RKE":
check_rke_ingress_rollout()
if cm_install:
install_cert_manager()
add_repo_create_namespace()
# Here we use helm to install the Rancher chart
install_rancher(extra_settings=extra_settings)
if RANCHER_LOCAL_CLUSTER_TYPE == "EKS":
# For EKS we need to wait for EKS to generate the nlb and then configure
# a Route53 record with the ingress address value
set_route53_with_eks_ingress()
if RANCHER_LOCAL_CLUSTER_TYPE == "AKS":
set_route53_with_aks_ingress()
wait_for_status_code(url=RANCHER_SERVER_URL + "/v3", expected_code=401)
auth_url = \
RANCHER_SERVER_URL + "/v3-public/localproviders/local?action=login"
wait_for_status_code(url=auth_url, expected_code=200)
admin_client = set_url_and_password(RANCHER_SERVER_URL)
cluster = get_cluster_by_name(admin_client, "local")
validate_cluster_state(admin_client, cluster, False)
if RANCHER_ADD_CUSTOM_CLUSTER.upper() == "TRUE":
print("creating an custom cluster")
create_custom_cluster(admin_client)
def create_custom_cluster(admin_client):
auth_url = RANCHER_SERVER_URL + \
"/v3-public/localproviders/local?action=login"
wait_for_status_code(url=auth_url, expected_code=200)
user, user_token = create_user(admin_client, auth_url)
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
5, random_test_name(resource_prefix + "-custom"))
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
client = rancher.Client(url=RANCHER_SERVER_URL + "/v3",
token=user_token, verify=False)
cluster = client.create_cluster(
name=random_name(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "provisioning"
i = 0
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(
client, cluster, node_roles[i], aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
validate_cluster(client, cluster, userToken=user_token)
def test_upgrade_rancher_ha(precheck_upgrade_options):
write_kubeconfig()
add_repo_create_namespace()
install_rancher(upgrade=True)
def create_resources_eks():
cluster_name = resource_prefix + "-ekscluster"
AmazonWebServices().create_eks_cluster(cluster_name)
AmazonWebServices().wait_for_eks_cluster_state(cluster_name, "ACTIVE")
def create_resources():
# Create nlb and grab ARN & dns name
lb = AmazonWebServices().create_network_lb(name=resource_prefix + "-nlb")
lbArn = lb["LoadBalancers"][0]["LoadBalancerArn"]
lbDns = lb["LoadBalancers"][0]["DNSName"]
# Upsert the route53 record -- if it exists, update, if not, insert
AmazonWebServices().upsert_route_53_record_cname(RANCHER_HA_HOSTNAME,
lbDns)
# Create the target groups
tg80 = AmazonWebServices(). \
create_ha_target_group(80, resource_prefix + "-tg-80")
tg443 = AmazonWebServices(). \
create_ha_target_group(443, resource_prefix + "-tg-443")
tg80Arn = tg80["TargetGroups"][0]["TargetGroupArn"]
tg443Arn = tg443["TargetGroups"][0]["TargetGroupArn"]
# Create listeners for the load balancer, to forward to the target groups
AmazonWebServices().create_ha_nlb_listener(loadBalancerARN=lbArn,
port=80,
targetGroupARN=tg80Arn)
AmazonWebServices().create_ha_nlb_listener(loadBalancerARN=lbArn,
port=443,
targetGroupARN=tg443Arn)
targets = []
aws_nodes = AmazonWebServices().\
create_multiple_nodes(3, resource_prefix + "-server")
assert len(aws_nodes) == 3
for aws_node in aws_nodes:
print(aws_node.public_ip_address)
targets.append(aws_node.provider_node_id)
# Register the nodes to the target groups
targets_list = [dict(Id=target_id, Port=80) for target_id in targets]
AmazonWebServices().register_targets(targets_list, tg80Arn)
targets_list = [dict(Id=target_id, Port=443) for target_id in targets]
AmazonWebServices().register_targets(targets_list, tg443Arn)
return aws_nodes
def install_cert_manager():
manifests = "https://github.com/jetstack/cert-manager/releases/download/" \
"{0}/cert-manager.crds.yaml".format(CERT_MANAGER_VERSION)
cm_repo = "https://charts.jetstack.io"
run_command_with_stderr(export_cmd + " && kubectl apply -f " + manifests)
run_command_with_stderr("helm_v3 repo add jetstack " + cm_repo)
run_command_with_stderr("helm_v3 repo update")
run_command_with_stderr(export_cmd + " && " +
"kubectl create namespace cert-manager")
run_command_with_stderr(export_cmd + " && " +
"helm_v3 install cert-manager "
"jetstack/cert-manager "
"--namespace cert-manager "
"--version {0}".format(CERT_MANAGER_VERSION))
time.sleep(120)
def install_eks_ingress():
run_command_with_stderr(export_cmd + " && kubectl apply -f " +
DATA_SUBDIR + "/eks_nlb.yml")
def set_route53_with_eks_ingress():
kubectl_ingress = "kubectl get ingress -n cattle-system -o " \
"jsonpath=\"" \
"{.items[0].status.loadBalancer.ingress[0].hostname}\""
ingress_address = run_command_with_stderr(export_cmd
+ " && " +
kubectl_ingress).decode()
AmazonWebServices().upsert_route_53_record_cname(RANCHER_HA_HOSTNAME,
ingress_address)
time.sleep(60)
def set_route53_with_aks_ingress():
kubectl_ingress = "kubectl get svc -n ingress-nginx " \
"ingress-nginx-controller -o " \
"jsonpath=\"" \
"{.status.loadBalancer.ingress[0].ip}\""
time.sleep(10)
ingress_address = run_command_with_stderr(export_cmd
+ " && " +
kubectl_ingress).decode()
print("AKS INGRESS ADDRESS:")
print(ingress_address)
AmazonWebServices().upsert_route_53_record_cname(RANCHER_HA_HOSTNAME,
ingress_address,
record_type='A')
time.sleep(60)
def add_repo_create_namespace(repo=RANCHER_HELM_REPO):
repo_name = "rancher-" + repo
repo_url = "https://releases.rancher.com/server-charts/" + repo
run_command_with_stderr("helm_v3 repo add " + repo_name + " " + repo_url)
run_command_with_stderr("helm_v3 repo update")
run_command_with_stderr(export_cmd + " && " +
"kubectl create namespace cattle-system")
def install_rancher(type=RANCHER_HA_CERT_OPTION, repo=RANCHER_HELM_REPO,
upgrade=False, extra_settings=None):
operation = "install"
if upgrade:
operation = "upgrade"
helm_rancher_cmd = \
export_cmd + " && helm_v3 " + operation + " rancher " + \
"rancher-" + repo + "/rancher " + \
"--version " + RANCHER_CHART_VERSION + " " + \
"--namespace cattle-system " + \
"--set hostname=" + RANCHER_HA_HOSTNAME
if type == 'letsencrypt':
helm_rancher_cmd = \
helm_rancher_cmd + \
" --set ingress.tls.source=letsEncrypt " + \
"--set letsEncrypt.email=" + \
RANCHER_LETSENCRYPT_EMAIL
elif type == 'byo-self-signed':
helm_rancher_cmd = \
helm_rancher_cmd + \
" --set ingress.tls.source=secret " + \
"--set privateCA=true"
elif type == 'byo-valid':
helm_rancher_cmd = \
helm_rancher_cmd + \
" --set ingress.tls.source=secret"
if RANCHER_IMAGE_TAG != "" and RANCHER_IMAGE_TAG is not None:
helm_rancher_cmd = \
helm_rancher_cmd + \
" --set rancherImageTag=" + RANCHER_IMAGE_TAG
if operation == "install":
if type == "byo-self-signed":
create_tls_secrets(valid_cert=False)
elif type == "byo-valid":
create_tls_secrets(valid_cert=True)
if RANCHER_HELM_EXTRA_SETTINGS:
extra_settings.append(RANCHER_HELM_EXTRA_SETTINGS)
if extra_settings:
for setting in extra_settings:
helm_rancher_cmd = helm_rancher_cmd + " " + setting
run_command_with_stderr(helm_rancher_cmd)
time.sleep(120)
# set trace logging
set_trace_cmd = "kubectl -n cattle-system get pods -l app=rancher " + \
"--no-headers -o custom-columns=name:.metadata.name | " + \
"while read rancherpod; do kubectl -n cattle-system " + \
"exec $rancherpod -c rancher -- loglevel --set trace; done"
run_command_with_stderr(set_trace_cmd)
def create_tls_secrets(valid_cert):
cert_path = DATA_SUBDIR + "/tls.crt"
key_path = DATA_SUBDIR + "/tls.key"
ca_path = DATA_SUBDIR + "/cacerts.pem"
if valid_cert:
# write files from env var
write_encoded_certs(cert_path, RANCHER_VALID_TLS_CERT)
write_encoded_certs(key_path, RANCHER_VALID_TLS_KEY)
else:
write_encoded_certs(cert_path, RANCHER_BYO_TLS_CERT)
write_encoded_certs(key_path, RANCHER_BYO_TLS_KEY)
write_encoded_certs(ca_path, RANCHER_PRIVATE_CA_CERT)
tls_command = export_cmd + " && kubectl -n cattle-system " \
"create secret tls tls-rancher-ingress " \
"--cert=" + cert_path + " --key=" + key_path
ca_command = export_cmd + " && kubectl -n cattle-system " \
"create secret generic tls-ca " \
"--from-file=" + ca_path
run_command_with_stderr(tls_command)
if not valid_cert:
run_command_with_stderr(ca_command)
def write_encoded_certs(path, contents):
file = open(path, "w")
file.write(base64.b64decode(contents).decode("utf-8"))
file.close()
def write_kubeconfig():
file = open(kubeconfig_path, "w")
file.write(base64.b64decode(RANCHER_HA_KUBECONFIG).decode("utf-8"))
file.close()
def set_url_and_password(rancher_url, server_url=None):
admin_token = set_url_password_token(rancher_url, server_url)
admin_client = rancher.Client(url=rancher_url + "/v3",
token=admin_token, verify=False)
auth_url = rancher_url + "/v3-public/localproviders/local?action=login"
user, user_token = create_user(admin_client, auth_url)
env_details = "env.CATTLE_TEST_URL='" + rancher_url + "'\n"
env_details += "env.ADMIN_TOKEN='" + admin_token + "'\n"
env_details += "env.USER_TOKEN='" + user_token + "'\n"
create_config_file(env_details)
return admin_client
def create_rke_cluster(config_path):
rke_cmd = "rke --version && rke up --config " + config_path
run_command_with_stderr(rke_cmd)
def check_rke_ingress_rollout():
rke_version = run_command_with_stderr('rke -v | cut -d " " -f 3')
rke_version = ''.join(rke_version.decode('utf-8').split())
print("RKE VERSION: " + rke_version)
k8s_version = run_command_with_stderr(export_cmd + " && " +
'kubectl version --short | grep -i server | cut -d " " -f 3')
k8s_version = ''.join(k8s_version.decode('utf-8').split())
print("KUBERNETES VERSION: " + k8s_version)
if packaging.version.parse(rke_version) > packaging.version.parse("v1.2"):
if packaging.version.parse(k8s_version) >= packaging.version.parse("v1.21"):
run_command_with_stderr(
export_cmd + " && " +
"kubectl -n ingress-nginx rollout status ds/nginx-ingress-controller")
run_command_with_stderr(
export_cmd + " && " +
"kubectl -n ingress-nginx wait --for=condition=complete job/ingress-nginx-admission-create")
run_command_with_stderr(
export_cmd + " && " +
"kubectl -n ingress-nginx wait --for=condition=complete job/ingress-nginx-admission-patch")
def print_kubeconfig():
kubeconfig_file = open(kubeconfig_path, "r")
kubeconfig_contents = kubeconfig_file.read()
kubeconfig_file.close()
kubeconfig_contents_encoded = base64.b64encode(
kubeconfig_contents.encode("utf-8")).decode("utf-8")
print("\n\n" + kubeconfig_contents + "\n\n")
print("\nBase64 encoded: \n\n" + kubeconfig_contents_encoded + "\n\n")
def create_rke_cluster_config(aws_nodes):
configfile = "cluster-ha.yml"
rkeconfig = readDataFile(DATA_SUBDIR, configfile)
rkeconfig = rkeconfig.replace("$ip1", aws_nodes[0].public_ip_address)
rkeconfig = rkeconfig.replace("$ip2", aws_nodes[1].public_ip_address)
rkeconfig = rkeconfig.replace("$ip3", aws_nodes[2].public_ip_address)
rkeconfig = rkeconfig.replace("$internalIp1",
aws_nodes[0].private_ip_address)
rkeconfig = rkeconfig.replace("$internalIp2",
aws_nodes[1].private_ip_address)
rkeconfig = rkeconfig.replace("$internalIp3",
aws_nodes[2].private_ip_address)
rkeconfig = rkeconfig.replace("$user1", aws_nodes[0].ssh_user)
rkeconfig = rkeconfig.replace("$user2", aws_nodes[1].ssh_user)
rkeconfig = rkeconfig.replace("$user3", aws_nodes[2].ssh_user)
rkeconfig = rkeconfig.replace("$AWS_SSH_KEY_NAME", AWS_SSH_KEY_NAME)
rkeconfig = rkeconfig.replace("$KUBERNETES_VERSION", KUBERNETES_VERSION)
print("cluster-ha-filled.yml: \n" + rkeconfig + "\n")
clusterfilepath = DATA_SUBDIR + "/" + "cluster-ha-filled.yml"
f = open(clusterfilepath, "w")
f.write(rkeconfig)
f.close()
return clusterfilepath
def create_aks_cluster():
tf_dir = DATA_SUBDIR + "/" + "terraform/aks"
aks_k8_s_version = os.environ.get('RANCHER_AKS_K8S_VERSION', '')
aks_location = os.environ.get('RANCHER_AKS_LOCATION', '')
client_id = os.environ.get('ARM_CLIENT_ID', '')
client_secret = os.environ.get('ARM_CLIENT_SECRET', '')
tf = Terraform(working_dir=tf_dir,
variables={'kubernetes_version': aks_k8_s_version,
'location': aks_location,
'client_id': client_id,
'client_secret': client_secret,
'cluster_name': resource_prefix})
print("Creating cluster")
tf.init()
print(tf.plan(out="aks_plan_server.out"))
print("\n\n")
print(tf.apply("--auto-approve"))
print("\n\n")
out_string = tf.output("kube_config", full_value=True)
with open(kubeconfig_path, "w") as kubefile:
kubefile.write(out_string)
def install_aks_ingress():
run_command_with_stderr(export_cmd + " && kubectl apply -f " +
DATA_SUBDIR + "/aks_nlb.yml")
@pytest.fixture(scope='module')
def precheck_certificate_options():
if RANCHER_HA_CERT_OPTION == 'byo-valid':
if RANCHER_VALID_TLS_CERT == '' or \
RANCHER_VALID_TLS_KEY == '' or \
RANCHER_VALID_TLS_CERT is None or \
RANCHER_VALID_TLS_KEY is None:
raise pytest.skip(
'Valid certificates not found in environment variables')
elif RANCHER_HA_CERT_OPTION == 'byo-self-signed':
if RANCHER_BYO_TLS_CERT == '' or \
RANCHER_BYO_TLS_KEY == '' or \
RANCHER_PRIVATE_CA_CERT == '' or \
RANCHER_BYO_TLS_CERT is None or \
RANCHER_BYO_TLS_KEY is None or \
RANCHER_PRIVATE_CA_CERT is None:
raise pytest.skip(
'Self signed certificates not found in environment variables')
elif RANCHER_HA_CERT_OPTION == 'letsencrypt':
if RANCHER_LETSENCRYPT_EMAIL == '' or \
RANCHER_LETSENCRYPT_EMAIL is None:
raise pytest.skip(
'LetsEncrypt email is not found in environment variables')
@pytest.fixture(scope='module')
def precheck_upgrade_options():
if RANCHER_HA_KUBECONFIG == '' or RANCHER_HA_KUBECONFIG is None:
raise pytest.skip('Kubeconfig is not found for upgrade!')
if RANCHER_HA_HOSTNAME == '' or RANCHER_HA_HOSTNAME is None:
raise pytest.skip('Hostname is not found for upgrade!')
| 21,944 | 40.5625 | 108 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_k8s_version_networkmodes.py
|
from lib.aws import AmazonWebServices
from .common import * # NOQA
k8s_version = "v1.10.1-rancher1"
rke_config = {"authentication": {"type": "authnConfig", "strategy": "x509"},
"ignoreDockerVersion": False,
"type": "rancherKubernetesEngineConfig"}
RANCHER_CLEANUP_CLUSTER = os.environ.get('RANCHER_CLEANUP_CLUSTER', "True")
NETWORK_PLUGIN = os.environ.get('NETWORK_PLUGIN', "canal")
def test_rke_custom_k8s_1_8_10():
validate_k8s_version("v1.8.10-rancher1-1", plugin=NETWORK_PLUGIN)
def test_rke_custom_k8s_1_8_11():
validate_k8s_version("v1.8.11-rancher1", plugin=NETWORK_PLUGIN)
def test_rke_custom_k8s_1_9_5():
validate_k8s_version("v1.9.5-rancher1-1", plugin=NETWORK_PLUGIN)
def test_rke_custom_k8s_1_9_7():
validate_k8s_version("v1.9.7-rancher1", plugin=NETWORK_PLUGIN)
def test_rke_custom_k8s_1_10_0():
validate_k8s_version("v1.10.0-rancher1-1", plugin=NETWORK_PLUGIN)
def test_rke_custom_k8s_1_10_1():
validate_k8s_version("v1.10.1-rancher1", plugin=NETWORK_PLUGIN)
def validate_k8s_version(k8s_version, plugin="canal"):
rke_config["kubernetesVersion"] = k8s_version
rke_config["network"] = {"type": "networkConfig", "plugin": plugin}
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
8, random_test_name("testcustom"))
node_roles = [["controlplane"], ["controlplane"],
["etcd"], ["etcd"], ["etcd"],
["worker"], ["worker"], ["worker"]]
client = get_user_client()
cluster = client.create_cluster(name=random_name(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config)
assert cluster.state == "active"
i = 0
for aws_node in aws_nodes:
docker_run_cmd = \
get_custom_host_registration_cmd(client, cluster,
node_roles[i], aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
cluster = validate_cluster(client, cluster)
if RANCHER_CLEANUP_CLUSTER == "True":
delete_cluster(client, cluster)
delete_node(aws_nodes)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
| 2,291 | 33.208955 | 77 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/common.py
|
from ..common import * # NOQA
import inspect
import json
import os
import random
import subprocess
import ssl
import time
import requests
import ast
import paramiko
import rancher
import pytest
from urllib.parse import urlparse
from rancher import ApiError
from lib.aws import AmazonWebServices
from copy import deepcopy
from threading import Lock
from threading import Thread
import websocket
import base64
DEFAULT_CATALOG_TIMEOUT = 15
DEFAULT_MONITORING_TIMEOUT = 180
DEFAULT_CLUSTER_STATE_TIMEOUT = 320
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
DEFAULT_APP_DELETION_TIMEOUT = 360
DEFAULT_APP_V2_TIMEOUT = 60
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + "/v3-public/localproviders/local?action=login"
DNS_REGEX = "(https*://)(.*[^/])"
USER_PASSWORD = os.environ.get('USER_PASSWORD', "None")
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
HARDENED_CLUSTER = ast.literal_eval(
os.environ.get('RANCHER_HARDENED_CLUSTER', "False"))
TEST_OS = os.environ.get('RANCHER_TEST_OS', "linux")
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "ranchertest/mytestcontainer")
TEST_IMAGE_PORT = os.environ.get('RANCHER_TEST_IMAGE_PORT', "80")
TEST_IMAGE_NGINX = os.environ.get('RANCHER_TEST_IMAGE_NGINX', "nginx")
TEST_IMAGE_OS_BASE = os.environ.get('RANCHER_TEST_IMAGE_OS_BASE', "ubuntu")
if TEST_OS == "windows":
DEFAULT_TIMEOUT = 300
skip_test_windows_os = pytest.mark.skipif(
TEST_OS == "windows",
reason='Tests Skipped for including Windows nodes cluster')
skip_test_hardened = pytest.mark.skipif(
HARDENED_CLUSTER,
reason='Tests Skipped due to being a hardened cluster')
UPDATE_KDM = ast.literal_eval(os.environ.get('RANCHER_UPDATE_KDM', "False"))
KDM_URL = os.environ.get("RANCHER_KDM_URL", "")
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION = os.environ.get("AWS_REGION")
AWS_SUBNET = os.environ.get("AWS_SUBNET")
AWS_VPC = os.environ.get("AWS_VPC")
AWS_SG = os.environ.get("AWS_SG")
AWS_ZONE = os.environ.get("AWS_ZONE")
AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "")
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "")
AWS_S3_BUCKET_FOLDER_NAME = os.environ.get("AWS_S3_BUCKET_FOLDER_NAME", "")
LINODE_ACCESSKEY = os.environ.get('RANCHER_LINODE_ACCESSKEY', "None")
NFS_SERVER_MOUNT_PATH = "/nfs"
TEST_RBAC = ast.literal_eval(os.environ.get('RANCHER_TEST_RBAC', "False"))
if_test_rbac = pytest.mark.skipif(TEST_RBAC is False,
reason='rbac tests are skipped')
TEST_ALL_SNAPSHOT = ast.literal_eval(
os.environ.get('RANCHER_TEST_ALL_SNAPSHOT', "False")
)
if_test_all_snapshot = \
pytest.mark.skipif(TEST_ALL_SNAPSHOT is False,
reason='Snapshots check tests are skipped')
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
# As of release 2.4 default rke scan profile is "rke-cis-1.4"
CIS_SCAN_PROFILE = os.environ.get('RANCHER_CIS_SCAN_PROFILE', "rke-cis-1.4")
# here are all supported roles for RBAC testing
CLUSTER_MEMBER = "cluster-member"
CLUSTER_OWNER = "cluster-owner"
PROJECT_MEMBER = "project-member"
PROJECT_OWNER = "project-owner"
PROJECT_READ_ONLY = "read-only"
rbac_data = {
"project": None,
"namespace": None,
"workload": None,
"p_unshared": None,
"ns_unshared": None,
"wl_unshared": None,
"users": {
CLUSTER_OWNER: {},
CLUSTER_MEMBER: {},
PROJECT_OWNER: {},
PROJECT_MEMBER: {},
PROJECT_READ_ONLY: {},
}
}
auth_rbac_data = {
"project": None,
"namespace": None,
"users": {}
}
# here are the global role templates used for
# testing globalRoleBinding and groupRoleBinding
TEMPLATE_MANAGE_CATALOG = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"*"
],
"resources": [
"catalogs",
"templates",
"templateversions"
]
}
],
"name": "gr-test-manage-catalog",
}
TEMPLATE_LIST_CLUSTER = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"get",
"list",
"watch"
],
"resources": [
"clusters"
]
}
],
"name": "gr-test-list-cluster",
}
# this is used when testing users from a auth provider
AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "")
if AUTH_PROVIDER not in ["activeDirectory", "freeIpa", "openLdap", ""]:
pytest.fail("Invalid RANCHER_AUTH_PROVIDER. Please provide one of: "
"activeDirectory, freeIpa, or openLdap (case sensitive).")
NESTED_GROUP_ENABLED = ast.literal_eval(
os.environ.get('RANCHER_NESTED_GROUP_ENABLED', "False"))
# Admin Auth username and the shared password for all auth users
AUTH_USER_PASSWORD = os.environ.get('RANCHER_AUTH_USER_PASSWORD', "")
# the link to log in as an auth user
LOGIN_AS_AUTH_USER_URL = \
CATTLE_TEST_URL + "/v3-public/" \
+ AUTH_PROVIDER + "Providers/" \
+ AUTH_PROVIDER.lower() + "?action=login"
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
# This is used for nested group when a third part Auth is enabled
nested_group = {
"auth_info": None,
"users": None,
"group_dic": None,
"groups": None
}
auth_requirements = not AUTH_PROVIDER or not AUTH_USER_PASSWORD
if_test_group_rbac = pytest.mark.skipif(
auth_requirements,
reason='Group RBAC tests are skipped.'
'Required AUTH env variables '
'have not been set.'
)
# -----------------------------------------------------------------------------
# global variables from test_create_ha.py
test_run_id = "test" + str(random.randint(10000, 99999))
RANCHER_HOSTNAME_PREFIX = os.environ.get("RANCHER_HOSTNAME_PREFIX",
test_run_id)
CERT_MANAGER_VERSION = os.environ.get("RANCHER_CERT_MANAGER_VERSION", "v1.0.1")
# -----------------------------------------------------------------------------
# this is used for testing rbac v2
test_rbac_v2 = os.environ.get("RANCHER_TEST_RBAC_V2", "False")
if_test_rbac_v2 = pytest.mark.skipif(test_rbac_v2 != "True",
reason='test for rbac v2 is skipped')
def is_windows(os_type=TEST_OS):
return os_type == "windows"
def get_cluster_client_for_token_v1(cluster_id, token):
url = CATTLE_TEST_URL + "/k8s/clusters/" + cluster_id + "/v1/schemas"
return rancher.Client(url=url, token=token, verify=False)
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_user_client():
return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False)
def get_client_for_token(token, url=CATTLE_API_URL):
return rancher.Client(url=url, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def get_setting_value_by_name(name):
settings_url = CATTLE_API_URL + "/settings/" + name
head = {'Authorization': 'Bearer ' + ADMIN_TOKEN}
response = requests.get(settings_url, verify=False, headers=head)
return response.json()["value"]
# Return value is negative if v1 < v2, zero if v1 == v2 and positive if v1 > v2
def compare_versions(v1, v2):
if tuple(map(int, (v1.split(".")))) > tuple(map(int, (v2.split(".")))):
return 1
elif tuple(map(int, (v1.split(".")))) < tuple(map(int, (v2.split(".")))):
return -1
else:
return 0
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
server_url = cluster.links['self'].split("/clusters")[0]
client = get_client_for_token(token, server_url)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster, file_name=kube_fname):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(file_name, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_all_workload_image_from_rancher(project_client, ns, pod_count=1,
ignore_pod_count=False,
deployment_list=None,
daemonset_list=None,
cronjob_list=None, job_list=None):
if cronjob_list is None:
cronjob_list = []
if daemonset_list is None:
daemonset_list = []
if deployment_list is None:
deployment_list = []
if job_list is None:
job_list = []
workload_list = deployment_list + daemonset_list + cronjob_list + job_list
wls = [dep.name for dep in project_client.list_workload(namespaceId=ns.id).data]
assert len(workload_list) == len(wls), \
"Expected {} workload(s) to be present in {} namespace " \
"but there were {}".format(len(workload_list), ns.name, len(wls))
for workload_name in workload_list:
workloads = project_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == workload_list.count(workload_name), \
"Expected {} workload(s) to be present with name {} " \
"but there were {}".format(workload_list.count(workload_name),
workload_name, len(workloads))
for workload in workloads:
for container in workload.containers:
assert str(container.image).startswith("rancher/")
if workload_name in deployment_list:
validate_workload(project_client, workload, "deployment",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
deployment_list.remove(workload_name)
if workload_name in daemonset_list:
validate_workload(project_client, workload, "daemonSet",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
daemonset_list.remove(workload_name)
if workload_name in cronjob_list:
validate_workload(project_client, workload, "cronJob",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
cronjob_list.remove(workload_name)
if workload_name in job_list:
validate_workload(project_client, workload, "job",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
job_list.remove(workload_name)
# Final assertion to ensure all expected workloads have been validated
assert not deployment_list + daemonset_list + cronjob_list
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60, ignore_pod_count=False):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
if ignore_pod_count:
pods = p_client.list_pod(workloadId=workload.id).data
else:
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
if type == "job":
job_type = True
expected_status = "Succeeded"
else:
job_type = False
expected_status = "Running"
p = wait_for_pod_to_running(p_client, pod, job_type=job_type)
assert p["status"]["phase"] == expected_status
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == len(pods)
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == len(pods)
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= len(pods)
if type == "job":
assert wl_result["status"]["succeeded"] == len(pods)
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False,
kubeconfig=kube_fname):
command = 'kubectl --kubeconfig {0} {1}'.format(
kubeconfig, cmd)
if json_out:
command += ' -o json'
print("run cmd: \t{0}".format(command))
if stderr:
result = run_command_with_stderr(command, False)
else:
result = run_command(command, False)
print("returns: \t{0}".format(result))
if json_out:
result = json.loads(result)
return result
def run_command(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
return subprocess.check_output(command, shell=True, text=True)
except subprocess.CalledProcessError as e:
return None
def run_command_with_stderr(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.stderr
returncode = e.returncode
if log_out:
print("return code: \t{0}".format(returncode))
if returncode != 0:
print("output: \t{0}".format(output))
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT, job_type=False):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
if job_type:
expected_state = "succeeded"
else:
expected_state = "running"
while p.state != expected_state :
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster, client=None, os_type=TEST_OS):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker and (not node.unschedulable):
for key, val in node.labels.items():
# Either one of the labels should be present on the node
if key == 'kubernetes.io/os' or key == 'beta.kubernetes.io/os':
if val == os_type:
schedulable_nodes.append(node)
break
# Including master in list of nodes as master is also schedulable
if ('k3s' in cluster.version["gitVersion"] or 'rke2' in cluster.version["gitVersion"]) and node.controlPlane:
schedulable_nodes.append(node)
return schedulable_nodes
def get_etcd_nodes(cluster, client=None):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
etcd_nodes = []
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
return etcd_nodes
def get_role_nodes(cluster, role, client=None):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster, os_type="linux")
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = resolve_node_ip(node)
url = "http://" + host_ip + path
if not insecure_redirect:
wait_until_ok(url, timeout=300, headers={
"Host": host
})
cmd = curl_args + " " + url
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300,
certcheck=False, is_insecure=False):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name) \
or certcheck:
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list, insecure=is_insecure)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def wait_until_ok(url, timeout=120, headers={}):
start = time.time()
while not check_if_ok(url, headers=headers):
time.sleep(.5)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for {0} to become ok'.format(url)
)
return
def wait_for_status_code(url, expected_code=200, timeout=DEFAULT_TIMEOUT):
start = time.time()
r = requests.get(url, verify=False)
while r.status_code != expected_code:
time.sleep(1)
r = requests.get(url, verify=False)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for status code {0}'
', actual code {1}'.format(
expected_code, r.status_code
)
)
return
def check_if_ok(url, verify=False, headers={}):
try:
res = requests.head(url, verify=verify, headers=headers)
if res.status_code == 200:
return True
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return False
def validate_http_response(cmd, target_name_list, client_pod=None,
insecure=False):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
if insecure:
curl_cmd += "\t--insecure"
result = run_command(curl_cmd)
else:
if is_windows():
wget_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Invoke-WebRequest -UseBasicParsing -Uri ' \
'{0}).Content }}"'.format(cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
if result is not None:
result = result.rstrip()
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version="",
userToken=USER_TOKEN, timeout=MACHINE_TIMEOUT):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state,
timeout=timeout)
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd", client)))
# check all workloads under the system project are active
# wait for workloads to be active
# time.sleep(DEFAULT_TIMEOUT)
print("checking if workloads under the system project are active")
sys_project = client.list_project(name='System',
clusterId=cluster.id).data[0]
sys_p_client = get_project_client_for_token(sys_project, userToken)
for wl in sys_p_client.list_workload().data:
"""to help run KDM job faster (when there are many clusters),
timeout=300 is set"""
wait_for_wl_to_active(sys_p_client, wl, timeout=300)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonSet
project, ns = create_project_and_ns(userToken, cluster)
p_client = get_project_client_for_token(project, userToken)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster, client)))
if not skipIngresscheck:
pods = p_client.list_pod(workloadId=workload["id"]).data
scale = len(pods)
# test service discovery
validate_service_discovery(workload, scale, p_client, ns, pods)
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-rancher")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected, port=TEST_IMAGE_PORT):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected, port=port)
def validate_dns_entry(pod, host, expected, port=TEST_IMAGE_PORT):
if is_windows():
validate_dns_entry_windows(pod, host, expected)
return
# requires pod with `dig` available - TEST_IMAGE
if HARDENED_CLUSTER:
cmd = 'curl -vs {}:{} 2>&1'.format(host, port)
else:
cmd = 'ping -c 1 -W 1 {0}'.format(host)
cmd_output = kubectl_pod_exec(pod, cmd)
connectivity_validation_pass = False
for expected_value in expected:
if expected_value in str(cmd_output):
connectivity_validation_pass = True
break
assert connectivity_validation_pass is True
if HARDENED_CLUSTER:
assert " 200 OK" in str(cmd_output)
else:
assert " 0% packet loss" in str(cmd_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def validate_dns_entry_windows(pod, host, expected):
def ping_check():
ping_cmd = 'ping -w 1 -n 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, ping_cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
return ping_validation_pass and (" (0% loss)" in str(ping_output))
wait_for(callback=ping_check,
timeout_message="Failed to ping {0}".format(host))
def dig_check():
dig_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Resolve-DnsName {0}).IPAddress }}"'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
dig_validation_pass = True
for expected_value in expected:
if expected_value not in str(dig_output):
dig_validation_pass = False
break
return dig_validation_pass
wait_for(callback=dig_check,
timeout_message="Failed to resolve {0}".format(host))
def validate_dns_record_deleted(client, dns_record, timeout=DEFAULT_TIMEOUT):
"""
Checks whether dns_record got deleted successfully.
Validates if dns_record is null in for current object client.
@param client: Object client use to create dns_record
@param dns_record: record object subjected to be deleted
@param timeout: Max time to keep checking whether record is deleted or not
"""
time.sleep(2)
start = time.time()
records = client.list_dns_record(name=dns_record.name, ).data
while len(records) != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for record {} to be deleted"
"".format(dns_record.name))
time.sleep(.5)
records = client.list_dns_record(name=dns_record.name, ).data
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for node delete")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
if 'Administrator' == node.ssh_user:
cmd = cluster_token.windowsNodeCommand
cmd = cmd.replace('| iex', '--worker' + additional_options + ' | iex ')
else:
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_by_name(client, name):
clusters = client.list_cluster(name=name).data
assert len(clusters) == 1, "Cluster " + name + " does not exist"
return clusters[0]
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if len(nodes) > 0:
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststress*', 'testsa*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
host_ip = resolve_node_ip(node)
ip_list.append(host_ip)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
if aws_nodes is None:
# search instances by IPs in case names do not follow patterns
aws_nodes = AmazonWebServices().get_nodes(filters=[ip_filter])
if aws_nodes is None:
print("no instance is found in AWS")
else:
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
# Delete Cluster
client.delete(cluster)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
if is_windows():
cmd = 'ping -w 1 -n 1 {0}'.format(pod_ip)
elif HARDENED_CLUSTER:
cmd = 'curl -I {}:{}'.format(pod_ip, TEST_IMAGE_PORT)
else:
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
if not HARDENED_CLUSTER:
assert pod_ip in str(response)
if allow_connectivity:
if is_windows():
assert " (0% loss)" in str(response)
elif HARDENED_CLUSTER:
assert " 200 OK" in str(response)
else:
assert " 0% packet loss" in str(response)
else:
if is_windows():
assert " (100% loss)" in str(response)
elif HARDENED_CLUSTER:
assert " 200 OK" not in str(response)
else:
assert " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(10)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for pods in workload {}. Expected {}. "
"Got {}".format(workload.name, pod_count, len(pods)))
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_user_client_and_cluster(client=None):
if not client:
client = get_user_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def get_global_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[],
timeout=MACHINE_TIMEOUT):
start_time = time.time()
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
timeout = 60
start = time.time()
while "version" not in cluster.keys():
time.sleep(1)
cluster = client.reload(cluster)
delta = time.time() - start
if delta > timeout:
msg = "Timeout waiting for K8s version to be synced"
raise Exception(msg)
end_time = time.time()
diff = time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))
print("The total time for provisioning/updating the cluster {} : {}".
format(cluster.name, diff))
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload, source_port):
url = get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster, source_port):
get_endpoint_url_for_workload(p_client, workload, 600)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port_wk) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + ":" + \
str(source_port) + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
if is_windows():
cmd_write = \
'powershell -NoLogo -NonInteractive -Command ' \
'"& { echo {1} > {0} }"'.format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
if is_windows():
cmd_get_content = 'powershell -NoLogo -NonInteractive -Command ' \
'"& { cat {0} }"'.format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
# When the app is deployed it goes into Active state for a short
# period of time and then into installing/deploying.
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
"""
First wait for app to come in deployment state, then wait for it get
in active state. This is to avoid wrongly conclude that app is active
as app goes to state installing > active > deploying > active
@param client: Project client
@param app_id: App id of deployed app.
@param timeout: Max time allowed to wait for app to become active.
@return: app object
"""
start = time.time()
app_data = client.list_app(id=app_id).data
while len(app_data) == 0:
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for listing the app from API")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "deploying":
if time.time() - start > timeout / 3:
break
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for {0} to get to active,"
" the actual state: {1}".format(application.name,
application.state))
time.sleep(.5)
app = client.list_app(id=app_id).data
assert len(app) >= 1
application = app[0]
return application
def wait_for_app_to_remove(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
start = time.time()
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing" or application.state == "active":
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for app to not be installed")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_response_app_endpoint(p_client, appId,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
start = time.time()
try:
while True:
r = requests.head(url)
print(r.status_code)
if r.status_code == 200:
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting response to be 200.")
time.sleep(.5)
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
def resolve_node_ip(node):
if hasattr(node, 'externalIpAddress'):
node_ip = node.externalIpAddress
else:
node_ip = node.ipAddress
return node_ip
def provision_nfs_server():
node = AmazonWebServices().create_node(random_test_name("nfs-server"))
node.wait_for_ssh_ready()
c_path = os.getcwd()
cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh"
command = open(cmd_path, 'r').read()
node.execute_command(command)
return node
def get_defaut_question_answers(client, externalId):
def get_answer(quest):
if "default" in quest.keys():
answer = quest["default"]
else:
answer = ""
# If required and no default value is available, set fake value
# only for type string . For other types error out
if "required" in quest.keys():
if quest["required"]:
if quest["type"] == "enum" and "options" in quest.keys():
answer = quest["options"][0]
elif quest["type"] == "password":
answer = "R@ncher135"
elif quest["type"] == "string":
answer = "fake"
else:
assert False, \
"Cannot set default for types {}" \
"".format(quest["type"])
return answer
def check_if_question_needed(questions_and_answers, ques):
add_question = False
match_string = ques["showIf"]
match_q_as = match_string.split("&&")
for q_a in match_q_as:
items = q_a.split("=")
if len(items) == 1:
items.append("")
if items[0] in questions_and_answers.keys():
if questions_and_answers[items[0]] == items[1]:
add_question = True
else:
add_question = False
break
return add_question
questions_and_answers = {}
print("external id = {}".format(externalId))
template_revs = client.list_template_version(externalId=externalId).data
assert len(template_revs) == 1
template_rev = template_revs[0]
questions = template_rev.questions
for ques in questions:
add_question = True
if "showIf" in ques.keys():
add_question = \
check_if_question_needed(questions_and_answers, ques)
if add_question:
question = ques["variable"]
answer = get_answer(ques)
questions_and_answers[question] = get_answer(ques)
if "showSubquestionIf" in ques.keys():
if ques["showSubquestionIf"] == answer:
sub_questions = ques["subquestions"]
for sub_question in sub_questions:
question = sub_question["variable"]
questions_and_answers[question] = \
get_answer(sub_question)
print("questions_and_answers = {}".format(questions_and_answers))
return questions_and_answers
def validate_app_deletion(client, app_id,
timeout=DEFAULT_APP_DELETION_TIMEOUT):
app_data = client.list_app(id=app_id).data
start = time.time()
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for app to delete")
time.sleep(.5)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_catalog_app(proj_client, app, external_id, answer=None):
"""
This method validates all the workloads deployed are in active state,
have correct version and validates the answers.
@param proj_client: Project client object of a existing project.
@param app: Deployed app object.
@param external_id: URl of app API.
@param answer: answer, app seek while deploying, body of the post call.
@return: Deployed app object.
"""
if answer is None:
answers = get_defaut_question_answers(get_user_client(), external_id)
else:
answers = answer
# validate app is active
app = wait_for_app_to_active(proj_client, app.id)
assert app.externalId == external_id, \
"the version of the app is not correct"
# check if associated workloads are active
ns = app.targetNamespace
parameters = external_id.split('&')
assert len(parameters) > 1, \
"Incorrect list of parameters from catalog external ID"
chart_prefix = parameters[len(parameters) - 2].split("=")[1]
chart_suffix = parameters[len(parameters) - 1].split("=")[1]
chart = chart_prefix + "-" + chart_suffix
app_name = parameters[len(parameters) - 2].split("=")[1]
workloads = proj_client.list_workload(namespaceId=ns).data
# For longhorn app, only active state of workloads is verified as longhorn
# workloads do not have the field workloadLabels
# For all other apps active state of workloads & chart version are verified
if "longhorn" in app.externalId:
print("validating the Longhorn app, it may take longer than others")
for wl in workloads:
wait_for_wl_to_active(proj_client, wl)
else:
for wl in workloads:
print("Workload {} , state - {}".format(wl.id, wl.state))
assert wl.state == "active"
chart_deployed = get_chart_info(wl.workloadLabels)
print("Chart detail of app - {}".format(chart_deployed))
# '-' check is to make sure chart has both app name and version.
if app_name in chart_deployed and '-' in chart_deployed:
assert chart_deployed == chart, "the chart version is wrong"
# Validate_app_answers
assert len(answers.items() - app["answers"].items()) == 0, \
"Answers are not same as the original catalog answers"
return app
def get_chart_info(workloadlabels):
"""
This method finds either 'chart' tag or
'helm.sh/chart' tag from workload API
@param workloadlabels: workloadslabel object
@return: chart value of workload e.g. 'app_name-version'
"""
if "chart" in workloadlabels.keys():
return workloadlabels.chart
elif "helm.sh/chart" in workloadlabels.keys():
return workloadlabels["helm.sh/chart"]
else:
return ''
def create_user(client, cattle_auth_url=CATTLE_AUTH_URL):
user_name = random_name()
user = client.create_user(username=user_name,
password=USER_PASSWORD)
client.create_global_role_binding(globalRoleId="user",
subjectKind="User",
userId=user.id)
user_token = get_user_token(user.username, USER_PASSWORD, cattle_auth_url)
return user, user_token
def get_user_token(username, password, cattle_auth_url=CATTLE_AUTH_URL):
r = requests.post(cattle_auth_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
print(r.json())
return r.json()["token"]
def rbac_get_user_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["user"]
return None
def rbac_get_user_token_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["token"]
return None
def rbac_get_kubeconfig_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["kubeconfig"]
return None
def rbac_get_project():
return rbac_data["project"]
def rbac_get_namespace():
return rbac_data["namespace"]
def rbac_get_workload():
return rbac_data["workload"]
def rbac_get_unshared_project():
return rbac_data["p_unshared"]
def rbac_get_unshared_ns():
return rbac_data["ns_unshared"]
def rbac_get_unshared_workload():
return rbac_data["wl_unshared"]
def rbac_prepare():
"""this function creates one project, one namespace,
and four users with different roles"""
admin_client, cluster = get_global_admin_client_and_cluster()
create_kubeconfig(cluster)
# create a new project in the cluster
project, ns = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-test-rbac"))
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
rbac_data["workload"] = workload
rbac_data["project"] = project
rbac_data["namespace"] = ns
# create new users
for key in rbac_data["users"]:
user1, token1 = create_user(admin_client)
rbac_data["users"][key]["user"] = user1
rbac_data["users"][key]["token"] = token1
# assign different role to each user
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_OWNER]["user"],
cluster,
CLUSTER_OWNER)
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_MEMBER]["user"],
cluster,
CLUSTER_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_MEMBER]["user"],
project,
PROJECT_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_OWNER]["user"],
project,
PROJECT_OWNER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_READ_ONLY]["user"],
project,
PROJECT_READ_ONLY)
# create kubeconfig files for each user
for key in rbac_data["users"]:
user_client = get_client_for_token(rbac_data["users"][key]["token"])
_, user_cluster = get_user_client_and_cluster(user_client)
rbac_data["users"][key]["kubeconfig"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
key + "_kubeconfig")
create_kubeconfig(user_cluster, rbac_data["users"][key]["kubeconfig"])
# create another project that none of the above users are assigned to
p2, ns2 = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-unshared"))
name = random_test_name("default")
p_client = get_project_client_for_token(p2, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns2.id)
validate_workload(p_client, workload, "deployment", ns2.name)
rbac_data["p_unshared"] = p2
rbac_data["ns_unshared"] = ns2
rbac_data["wl_unshared"] = workload
def rbac_cleanup():
""" remove the project, namespace and users created for the RBAC tests"""
try:
client = get_admin_client()
except Exception:
print("Not able to get admin client. Not performing RBAC cleanup")
return
for _, value in rbac_data["users"].items():
try:
client.delete(value["user"])
except Exception:
pass
client.delete(rbac_data["project"])
client.delete(rbac_data["wl_unshared"])
client.delete(rbac_data["p_unshared"])
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
def create_catalog_external_id(catalog_name, template, version,
project_cluster_id=None, catalog_type=None):
if catalog_type is None:
return "catalog://?catalog=" + catalog_name + \
"&template=" + template + "&version=" + version
elif catalog_type == "project" or catalog_type == "cluster":
return "catalog://?catalog=" + project_cluster_id + "/" \
+ catalog_name + "&type=" + catalog_type \
+ "Catalog&template=" + template + "&version=" + version
def wait_for_catalog_active(client, catalog, timeout=DEFAULT_CATALOG_TIMEOUT):
time.sleep(2)
catalog_data = client.list_catalog(name=catalog.name)
print(catalog_data)
start = time.time()
assert len(catalog_data["data"]) >= 1, "Cannot find catalog"
catalog = catalog_data["data"][0]
while catalog.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
catalog_data = client.list_catalog(name=catalog.name)
assert len(catalog_data["data"]) >= 1
catalog = catalog_data["data"][0]
return catalog
def readDataFile(data_dir, name):
fname = os.path.join(data_dir, name)
print("File: " + fname)
is_file = os.path.isfile(fname)
assert is_file
with open(fname) as f:
return f.read()
def set_url_password_token(rancher_url, server_url=None):
"""Returns a ManagementContext for the default global admin user."""
auth_url = \
rancher_url + "/v3-public/localproviders/local?action=login"
r = requests.post(auth_url, json={
'username': 'admin',
'password': 'admin',
'responseType': 'json',
}, verify=False)
print(r.json())
token = r.json()['token']
print(token)
# Change admin password
client = rancher.Client(url=rancher_url + "/v3",
token=token, verify=False)
admin_user = client.list_user(username="admin").data
admin_user[0].setpassword(newPassword=ADMIN_PASSWORD)
# Set server-url settings
serverurl = client.list_setting(name="server-url").data
if server_url:
client.update(serverurl[0], value=server_url)
else:
client.update(serverurl[0], value=rancher_url)
return token
def validate_create_catalog(token, catalog_name, branch, url, permission=True):
"""
This function validates if the user has the permission to create a
global catalog.
:param token: user's token
:param catalog_name: the name of the catalog
:param branch: the branch of the git repo
:param url: the url of the git repo
:param permission: boolean value, True if the user can create catalog
:return: the catalog object or None
"""
client = get_client_for_token(token)
if not permission:
with pytest.raises(ApiError) as e:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
error_msg = "user with no permission should receive 403: Forbidden"
error_code = e.value.error.code
error_status = e.value.error.status
assert error_status == 403 and error_code == 'Forbidden', error_msg
return None
else:
try:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ str(e.error.status) + " " + e.error.code
catalog_list = client.list_catalog(name=catalog_name).data
assert len(catalog_list) == 1
return catalog_list[0]
def generate_template_global_role(name, new_user_default=False, template=None):
""" generate a template that is used for creating a global role"""
if template is None:
template = TEMPLATE_MANAGE_CATALOG
template = deepcopy(template)
if new_user_default:
template["newUserDefault"] = "true"
else:
template["newUserDefault"] = "false"
if name is None:
name = random_name()
template["name"] = name
return template
def wait_for_backup_to_active(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
while etcdbackupstate != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
print("BACKUP STATE")
print(etcdbackupstate)
return etcdbackupstate
def wait_for_backup_to_delete(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
while len(etcdbackups) == 1:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for backup to be deleted")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
def validate_backup_create(namespace, backup_info, backup_mode=None):
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
name = random_test_name("default")
if not hasattr(cluster, 'rancherKubernetesEngineConfig'):
assert False, "Cluster is not of type RKE"
con = [{"name": "test1",
"image": TEST_IMAGE}]
backup_info["workload"] = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, backup_info["workload"], "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test" + str(random_int(10000, 99999)) + ".com"
namespace["host"] = host
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [backup_info["workload"].id],
"targetPort": TEST_IMAGE_PORT}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Perform Backup
backup = cluster.backupEtcd()
backup_info["backupname"] = backup['metadata']['name']
wait_for_backup_to_active(cluster, backup_info["backupname"])
# Get all the backup info
etcdbackups = cluster.etcdBackups(name=backup_info["backupname"])
backup_info["etcdbackupdata"] = etcdbackups['data']
backup_info["backup_id"] = backup_info["etcdbackupdata"][0]['id']
if backup_mode == "s3":
backupfileurl = backup_info["etcdbackupdata"][0]['filename']
# Check the backup filename exists in S3
parseurl = urlparse(backupfileurl)
backup_info["backupfilename"] = os.path.basename(parseurl.path)
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert backup_found, "the backup was not found in the S3 bucket"
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
assert backup_info["etcdbackupdata"][0]['filename'] in response, \
"The filename doesn't match any of the files locally"
return namespace, backup_info
def validate_backup_restore(namespace, backup_info):
p_client = namespace["p_client"]
ns = namespace["ns"]
client = get_user_client()
cluster = namespace["cluster"]
name = random_test_name("default")
host = namespace["host"]
path = "/name.html"
con = [{"name": "test1",
"image": TEST_IMAGE}]
# Create workload after backup
testworkload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, testworkload, "deployment", ns.name)
# Perform Restore
cluster.restoreFromEtcdBackup(etcdBackupId=backup_info["backup_id"])
# After restore, validate cluster
validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=False)
# Verify the ingress created before taking the snapshot
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Verify the workload created after getting a snapshot does not exist
# after restore
workload_list = p_client.list_workload(uuid=testworkload.uuid).data
print(len(workload_list))
assert len(workload_list) == 0, "workload shouldn't exist after restore"
return namespace, backup_info
def validate_backup_delete(namespace, backup_info, backup_mode=None):
client = get_user_client()
cluster = namespace["cluster"]
client.delete(
cluster.etcdBackups(name=backup_info["backupname"])['data'][0]
)
wait_for_backup_to_delete(cluster, backup_info["backupname"])
assert len(cluster.etcdBackups(name=backup_info["backupname"])) == 0, \
"backup shouldn't be listed in the Cluster backups"
if backup_mode == "s3":
# Check the backup reference is deleted in Rancher and S3
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert_message = "The backup should't exist in the S3 bucket"
assert backup_found is False, assert_message
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
filename = backup_info["etcdbackupdata"][0]['filename']
assert filename not in response, \
"The file still exist in the filesystem"
def apply_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('apply -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def get_crd(ns, crd_name, kubectl_context):
return execute_kubectl_cmd('get ' + crd_name + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def delete_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('delete -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def prepare_auth_data():
name = \
os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource",
AUTH_PROVIDER.lower() + ".json")
with open(name) as reader:
auth_data = reader.read()
raw = json.loads(auth_data).get("nested_group_info")
nested_group["auth_info"] = raw.copy()
nested_group["users"] = raw.get("users")
raw.pop("users")
nested_group["group_dic"] = raw
nested_group["groups"] = raw.keys()
def is_nested():
""" check if the provided groups are nested groups,
return True if at least one of the groups contains other groups
"""
count = 0
for user, group in nested_group["group_dic"].items():
if len(group) == 0:
count += 1
if count < len(nested_group["group_dic"]):
return True
return False
def get_group(nested=False):
""" return a group or a nested group"""
if nested:
# return the name of a group that contains at least one other group
for item in nested_group["groups"]:
if len(nested_group["group_dic"].get(item).get("users")) == 0:
pass
sub_groups = nested_group["group_dic"].get(item).get("groups")
if len(sub_groups) == 0:
pass
for g in sub_groups:
if len(nested_group["group_dic"].get(g).get("users")) > 0:
return item
assert False, "cannot find any valid nested group"
else:
# return the name of a group that has at least one direct user
for group in nested_group["groups"]:
if len(nested_group["group_dic"].get(group).get("users")) > 0:
return group
assert False, "cannot find any valid non-nested group"
def get_user_by_group(group, nested=False):
""" return the list of uses in the group or nested group
if nested is False, return the direct users in the group;
otherwise, return all users including those from nested groups
"""
def get_user_in_nested_group(group, source):
if group == "":
return []
users = source["group_dic"].get(group).get("users")
for sub_group in source["group_dic"].get(group).get("groups"):
temp = get_user_in_nested_group(sub_group, source)
for user in temp:
if user not in users:
users.append(user)
return users
if nested:
users = get_user_in_nested_group(group, nested_group)
assert len(users) > 0, "no user in the group"
else:
users = nested_group["group_dic"].get(group).get("users")
assert users is not None, "no user in the group"
print("group: {}, users: {}".format(group, users))
return users
def get_a_group_and_a_user_not_in_it(nested=False):
""" return a group or a nested group and a user that is not in the group"""
all_users = nested_group["users"]
for group in nested_group["groups"]:
group_users = get_user_by_group(group, nested)
for user in all_users:
if user not in group_users:
print("group: {}, user not in it: {}".format(group, user))
return group, user
assert False, "cannot find a group and a user not in it"
def get_group_principal_id(group_name, token=ADMIN_TOKEN, expected_status=200):
""" get the group's principal id from the auth provider"""
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': group_name,
'principalType': 'group',
'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == expected_status
return r.json()['data'][0]["id"]
def login_as_auth_user(username, password, login_url=LOGIN_AS_AUTH_USER_URL):
""" login with the user account from the auth provider,
and return the user token"""
r = requests.post(login_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
assert r.status_code in [200, 201]
return r.json()
def validate_service_discovery(workload, scale,
p_client=None, ns=None, testclient_pods=None):
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
host = '{0}.{1}.svc.cluster.local'.format(workload.name, ns.id)
for pod in testclient_pods:
validate_dns_entry(pod, host, expected_ips)
def auth_get_project():
return auth_rbac_data["project"]
def auth_get_namespace():
return auth_rbac_data["namespace"]
def auth_get_user_token(username):
if username in auth_rbac_data["users"].keys():
return auth_rbac_data["users"][username].token
return None
def add_role_to_user(user, role):
"""this function adds a user from the auth provider to given cluster"""
admin_client, cluster = get_global_admin_client_and_cluster()
project = auth_get_project()
ns = auth_get_namespace()
if not (project and ns):
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster,
random_test_name("p-test-auth"))
auth_rbac_data["project"] = project
auth_rbac_data["namespace"] = ns
if role in [PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY]:
assign_members_to_project(admin_client, user, project, role)
else:
assign_members_to_cluster(admin_client, user, cluster, role)
auth_rbac_data["users"][user.username] = user
def auth_resource_cleanup():
""" remove the project and namespace created for the AUTH tests"""
client, cluster = get_global_admin_client_and_cluster()
client.delete(auth_rbac_data["project"])
auth_rbac_data["project"] = None
auth_rbac_data["ns"] = None
for username, user in auth_rbac_data["users"].items():
user_crtbs = client.list_cluster_role_template_binding(userId=user.id)
for crtb in user_crtbs:
client.delete(crtb)
class WebsocketLogParse:
"""
the class is used for receiving and parsing the message
received from the websocket
"""
def __init__(self):
self.lock = Lock()
self._last_message = ''
def receiver(self, socket, skip, b64=True):
"""
run a thread to receive and save the message from the web socket
:param socket: the socket connection
:param skip: if True skip the first char of the received message
"""
while True and socket.connected:
try:
data = socket.recv()
# the message from the kubectl contains an extra char
if skip:
data = data[1:]
if len(data) < 5:
pass
if b64:
data = base64.b64decode(data).decode()
self.lock.acquire()
self._last_message += data
self.lock.release()
except websocket.WebSocketConnectionClosedException:
print("Connection closed")
break
except websocket.WebSocketProtocolException as wpe:
print("Error: {}".format(wpe))
break
@staticmethod
def start_thread(target, args):
thread = Thread(target=target, args=args)
thread.daemon = True
thread.start()
time.sleep(1)
@property
def last_message(self):
return self._last_message
@last_message.setter
def last_message(self, value):
self.lock.acquire()
self._last_message = value
self.lock.release()
def wait_for_cluster_delete(client, cluster_name, timeout=DEFAULT_TIMEOUT):
start = time.time()
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
while cluster_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for cluster to get deleted")
time.sleep(.5)
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
def create_connection(url, subprotocols):
"""
create a webscoket connection and check if it is connected
:param url: the url to connect to
:param subprotocols: the list of subprotocols
:return:
"""
ws = websocket.create_connection(
url=url,
sslopt={"cert_reqs": ssl.CERT_NONE},
subprotocols=subprotocols,
timeout=10,
cookie="R_SESS=" + USER_TOKEN
)
assert ws.connected, "failed to build the websocket"
return ws
def wait_for_hpa_to_active(client, hpa, timeout=DEFAULT_TIMEOUT):
start = time.time()
hpalist = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpalist) == 1
hpa = hpalist[0]
while hpa.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
hpas = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpas) == 1
hpa = hpas[0]
return hpa
def create_pv_pvc(client, ns, nfs_ip, cluster_client):
pv_object = create_pv(cluster_client, nfs_ip)
pvc_name = random_test_name("pvc")
pvc_config = {"accessModes": ["ReadWriteOnce"],
"name": pvc_name,
"volumeId": pv_object.id,
"namespaceId": ns.id,
"storageClassId": "",
"resources": {"requests": {"storage": "10Gi"}}
}
pvc_object = client.create_persistent_volume_claim(pvc_config)
pvc_object = wait_for_pvc_to_be_bound(client, pvc_object, timeout=300)
return pv_object, pvc_object
def create_pv(client, nfs_ip):
pv_name = random_test_name("pv")
pv_config = {"type": "persistentVolume",
"accessModes": ["ReadWriteOnce"],
"name": pv_name,
"nfs": {"readOnly": "false",
"type": "nfsvolumesource",
"path": NFS_SERVER_MOUNT_PATH,
"server": nfs_ip
},
"capacity": {"storage": "50Gi"}
}
pv_object = client.create_persistent_volume(pv_config)
capacitydict = pv_object['capacity']
assert capacitydict['storage'] == '50Gi'
assert pv_object['type'] == 'persistentVolume'
return pv_object
def delete_resource_in_AWS_by_prefix(resource_prefix):
"""
:param resource_prefix: the prefix of resource name
:return: None
"""
# delete nodes of both local and custom clusters
node_filter = [{
'Name': 'tag:Name',
'Values': [resource_prefix + "-*"]
}]
nodes = AmazonWebServices().get_nodes(filters=node_filter)
if nodes is None:
print("deleting the following instances: None")
else:
print("deleting the following instances: {}"
.format([node.public_ip_address for node in nodes]))
AmazonWebServices().delete_nodes(nodes)
# delete load balancer and target groups
tg_list = []
lb_list = []
lb_names = [resource_prefix + '-nlb',
resource_prefix + '-k3s-nlb',
resource_prefix + '-internal-nlb']
for name in lb_names:
lb_arn = AmazonWebServices().get_lb(name)
if lb_arn is not None:
lb_list.append(lb_arn)
res = AmazonWebServices().get_target_groups(lb_arn)
tg_list.extend(res)
print("deleting the following load balancers: {}".format(lb_list))
print("deleting the following target groups: {}".format(tg_list))
for lb in lb_list:
AmazonWebServices().delete_lb(lb)
for tg in tg_list:
AmazonWebServices().delete_target_group(tg)
# delete rds
db_name = resource_prefix + "-db"
print("deleting the database (if it exists): {}".format(db_name))
AmazonWebServices().delete_db(db_name)
# delete the route 53 record
route53_names = [resource_prefix + ".qa.rancher.space.",
resource_prefix + "-internal.qa.rancher.space."]
for name in route53_names:
print("deleting the route53 record (if it exists): {}".format(name))
AmazonWebServices().delete_route_53_record(name)
print("deletion is done")
return None
def configure_cis_requirements(aws_nodes, profile, node_roles, client,
cluster):
i = 0
if profile == 'rke-cis-1.4':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo useradd etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
elif profile == 'rke-cis-1.5':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w vm.panic_on_oom=0")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
aws_node.execute_command("sudo sysctl -w "
"kernel.keys.root_maxbytes=25000000")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo groupadd -g 52034 etcd")
aws_node.execute_command("sudo useradd -u 52034 -g 52034 etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
time.sleep(5)
cluster = validate_cluster_state(client, cluster)
# the workloads under System project to get active
time.sleep(20)
if profile == 'rke-cis-1.5':
create_kubeconfig(cluster)
network_policy_file = DATA_SUBDIR + "/default-allow-all.yaml"
account_update_file = DATA_SUBDIR + "/account_update.yaml"
items = execute_kubectl_cmd("get namespaces -A")["items"]
all_ns = [item["metadata"]["name"] for item in items]
for ns in all_ns:
execute_kubectl_cmd("apply -f {0} -n {1}".
format(network_policy_file, ns))
namespace = ["default", "kube-system"]
for ns in namespace:
execute_kubectl_cmd('patch serviceaccount default'
' -n {0} -p "$(cat {1})"'.
format(ns, account_update_file))
return cluster
def get_node_details(cluster, client):
"""
lists the nodes from the cluster. This cluster has only 1 node.
:return: client and node object
"""
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
for node in nodes:
if node.worker:
break
return client, node
def create_service_account_configfile():
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
name = random_name()
# create a service account
execute_kubectl_cmd(cmd="create sa {}".format(name), json_out=False)
# get the ca and token
res = execute_kubectl_cmd(cmd="get secret -o name", json_out=False)
secret_name = ""
for item in res.split("\n"):
if name in item:
secret_name = item.split("/")[1]
break
res = execute_kubectl_cmd(cmd="get secret {}".format(secret_name))
ca = res["data"]["ca.crt"]
token = res["data"]["token"]
token = base64.b64decode(token).decode()
server = None
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.controlPlane:
server = "https://" + node.externalIpAddress + ":6443"
break
assert server is not None, 'failed to get the public ip of control plane'
config = """
apiVersion: v1
kind: Config
clusters:
- name: test-cluster
cluster:
server: {server}
certificate-authority-data: {ca}
contexts:
- name: default-context
context:
cluster: test-cluster
namespace: default
user: test-user
current-context: default-context
users:
- name: test-user
user:
token: {token}
"""
config = config.format(server=server, ca=ca, token=token)
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
with open(config_file, "w") as file:
file.write(config)
return name
def rbac_test_file_reader(file_path=None):
"""
This method generates test cases from an input file and return the result
that can be used to parametrize pytest cases
:param file_path: the path to the JSON file for test cases
:return: a list of tuples of
(cluster_role, command, authorization, service account name)
"""
if test_rbac_v2 == "False":
return []
if file_path is None:
pytest.fail("no file is provided")
with open(file_path) as reader:
test_cases = json.loads(reader.read().replace("{resource_root}",
DATA_SUBDIR))
output = []
for cluster_role, checks in test_cases.items():
# create a service account for each role
name = create_service_account_configfile()
# create the cluster role binding
cmd = "create clusterrolebinding {} " \
"--clusterrole {} " \
"--serviceaccount {}".format(name, cluster_role,
"default:" + name)
execute_kubectl_cmd(cmd, json_out=False)
for command in checks["should_pass"]:
output.append((cluster_role, command, True, name))
for command in checks["should_fail"]:
output.append((cluster_role, command, False, name))
return output
def validate_cluster_role_rbac(cluster_role, command, authorization, name):
"""
This methods creates a new service account to validate the permissions
both before and after creating the cluster role binding between the
service account and the cluster role
:param cluster_role: the cluster role
:param command: the kubectl command to run
:param authorization: if the service account has the permission: True/False
:param name: the name of the service account, cluster role binding, and the
kubeconfig file
"""
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
result = execute_kubectl_cmd(command,
json_out=False,
kubeconfig=config_file,
stderr=True).decode('utf_8')
if authorization:
assert "Error from server (Forbidden)" not in result, \
"{} should have the authorization to run {}".format(cluster_role,
command)
else:
assert "Error from server (Forbidden)" in result, \
"{} should NOT have the authorization to run {}".format(
cluster_role, command)
def wait_until_app_v2_deployed(client, app_name, timeout=DEFAULT_APP_V2_TIMEOUT):
"""
List all installed apps and check for the state of "app_name" to see
if it == "deployed"
:param client: cluster client for the user
:param app_name: app which is being installed
:param timeout: time for the app to come to Deployed state
:return:
"""
start = time.time()
app = client.list_catalog_cattle_io_app()
while True:
app_list = []
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to Deployed")
time.sleep(.5)
for app in app["data"]:
app_list.append(app["metadata"]["name"])
if app["metadata"]["name"] == app_name:
if app["status"]["summary"]["state"] == "deployed":
return app_list
app = client.list_catalog_cattle_io_app()
return
def wait_until_app_v2_uninstall(client, app_name, timeout=DEFAULT_APP_V2_TIMEOUT):
"""
list all installed apps. search for "app_name" in the list
if app_name is NOT in list, indicates the app has been uninstalled successfully
:param client: cluster client for the user
:param app_name: app which is being unstalled
:param timeout: time for app to be uninstalled
"""
start = time.time()
app = client.list_catalog_cattle_io_app()
while True:
app_list = []
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to Uninstalled")
time.sleep(.5)
for app in app["data"]:
app_list.append(app["metadata"]["name"])
if app_name not in app_list:
return app_list
app = client.list_catalog_cattle_io_app()
return
def check_v2_app_and_uninstall(client, chart_name):
app = client.list_catalog_cattle_io_app()
for app in app["data"]:
if app["metadata"]["name"] == chart_name:
response = client.action(obj=app, action_name="uninstall")
app_list = wait_until_app_v2_uninstall(client, chart_name)
assert chart_name not in app_list, \
"App has not uninstalled"
def update_and_validate_kdm(kdm_url, admin_token=ADMIN_TOKEN,
rancher_api_url=CATTLE_API_URL):
print("Updating KDM to use {}".format(kdm_url))
header = {'Authorization': 'Bearer ' + admin_token}
api_url = rancher_api_url + "/settings/rke-metadata-config"
kdm_json = {
"name": "rke-metadata-config",
"value": json.dumps({
"refresh-interval-minutes": "1440",
"url": kdm_url
})
}
r = requests.put(api_url, verify=False, headers=header, json=kdm_json)
r_content = json.loads(r.content)
assert r.ok
assert r_content['name'] == kdm_json['name']
assert r_content['value'] == kdm_json['value']
time.sleep(2)
# Refresh Kubernetes Metadata
kdm_refresh_url = rancher_api_url + "/kontainerdrivers?action=refresh"
response = requests.post(kdm_refresh_url, verify=False, headers=header)
assert response.ok
| 109,519 | 35.912706 | 117 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_app.py
|
from .common import * # NOQA
import pytest
from .test_rbac import create_user
from .test_rke_cluster_provisioning import create_and_validate_custom_host
project_detail = {"cluster1": {"project1": None, "namespace1": None,
"project2": None, "namespace2": None,
"cluster": None},
"cluster2": {"project1": None, "namespace1": None,
"cluster": None}}
user_token = {"user_c1_p1_owner": {"user": None, "token": None},
"user_c1_p1_member": {"user": None, "token": None},
"user_c1_p2_owner": {"user": None, "token": None},
"user_c2_p1_owner": {"user": None, "token": None},
"user_c1_owner": {"user": None, "token": None},
"user_c1_member": {"user": None, "token": None},
"user_c2_owner": {"user": None, "token": None},
"user_standard": {"user": None, "token": None}}
CATALOG_NAME = random_test_name("test-catalog")
PROJECT_CATALOG = random_test_name("test-pj")
CLUSTER_CATALOG = random_test_name("test-cl")
CATALOG_URL = "https://github.com/rancher/integration-test-charts.git"
BRANCH = "validation-tests"
MYSQL_EXTERNALID_131 = create_catalog_external_id(CATALOG_NAME,
"mysql", "1.3.1")
MYSQL_EXTERNALID_132 = create_catalog_external_id(CATALOG_NAME,
"mysql",
"1.3.2")
WORDPRESS_EXTID = create_catalog_external_id(CATALOG_NAME,
"wordpress",
"7.3.8")
def cluster_and_client(cluster_id, mgmt_client):
cluster = mgmt_client.by_id_cluster(cluster_id)
url = cluster.links.self + '/schemas'
client = rancher.Client(url=url,
verify=False,
token=mgmt_client.token)
return cluster, client
def wait_for_template_to_be_created(client, name, timeout=45):
found = False
start = time.time()
interval = 0.5
while not found:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for templates")
templates = client.list_template(catalogId=name)
if len(templates) > 0:
found = True
time.sleep(interval)
interval *= 2
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
@if_test_rbac
def test_tiller():
name = random_test_name()
admin_client = get_user_client()
clusters = admin_client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster_id = clusters[0].id
p = admin_client. \
create_project(name="test-" + random_str(),
clusterId=cluster_id,
resourceQuota={
"limit": {
"secrets": "1"}},
namespaceDefaultResourceQuota={
"limit": {
"secrets": "1"}}
)
p = admin_client.reload(p)
proj_client = rancher.Client(url=p.links.self +
'/schemas', verify=False,
token=USER_TOKEN)
# need a cluster scoped client to create a namespace
_cluster, cluster_client = cluster_and_client(cluster_id, admin_client)
ns = cluster_client.create_namespace(name=random_str(),
projectId=p.id,
resourceQuota={
"limit": {
"secrets": "1"
}}
)
wait_for_template_to_be_created(admin_client, "library")
app = proj_client.create_app(
name=name,
externalId=WORDPRESS_EXTID,
targetNamespace=ns.name,
projectId=p.id,
answers=get_defaut_question_answers(admin_client, WORDPRESS_EXTID)
)
app = proj_client.reload(app)
# test for tiller to be stuck on bad installs
wait_for_condition(proj_client, app, check_condition('Installed', 'False'))
# cleanup by deleting project
admin_client.delete(p)
@if_test_rbac
def test_app_deploy():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["cluster1"]["project1"],
USER_TOKEN)
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_131)
wait_for_template_to_be_created(admin_client, "library")
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_131,
targetNamespace=project_detail["cluster1"]["namespace1"].name,
projectId=project_detail["cluster1"]["project1"].id,
answers=answer)
print("App is active")
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_131)
proj_client.delete(app)
@if_test_rbac
def test_app_delete():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["cluster1"]["project1"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_131)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_131,
targetNamespace=project_detail["cluster1"]["namespace1"].name,
projectId=project_detail["cluster1"]["project1"].id,
answers=answer)
print("App is active")
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_131)
app = proj_client.delete(app)
validate_app_deletion(proj_client, app.id)
@if_test_rbac
def test_app_upgrade_version():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["cluster1"]["project1"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_131)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_131,
targetNamespace=project_detail["cluster1"]["namespace1"].name,
projectId=project_detail["cluster1"]["project1"].id,
answers=answer)
print("App is active")
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_131)
new_answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_132)
app = proj_client.update(
obj=app,
externalId=MYSQL_EXTERNALID_132,
targetNamespace=project_detail["cluster1"]["namespace1"].name,
projectId=project_detail["cluster1"]["project1"].id,
answers=new_answer)
app = proj_client.reload(app)
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_132)
assert app.externalId == MYSQL_EXTERNALID_132, "incorrect template version"
proj_client.delete(app)
@if_test_rbac
def test_app_rollback():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["cluster1"]["project1"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_131)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_131,
targetNamespace=project_detail["cluster1"]["namespace1"].name,
projectId=project_detail["cluster1"]["project1"].id,
answers=answer)
print("App is active")
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_131)
rev_id = app.appRevisionId
new_answer = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_132)
app = proj_client.update(
obj=app,
externalId=MYSQL_EXTERNALID_132,
targetNamespace=project_detail["cluster1"]["namespace1"].name,
projectId=project_detail["cluster1"]["project1"].id,
answers=new_answer)
app = proj_client.reload(app)
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_132)
assert app.externalId == MYSQL_EXTERNALID_132, "incorrect template version"
proj_client.action(obj=app,
action_name='rollback',
revisionId=rev_id)
app = proj_client.reload(app)
validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_131)
assert app.externalId == MYSQL_EXTERNALID_131, "incorrect template version"
proj_client.delete(app)
@if_test_rbac
def test_app_answer_override():
admin_client = get_user_client()
proj_client = get_project_client_for_token(
project_detail["cluster1"]["project1"],
USER_TOKEN)
wait_for_template_to_be_created(admin_client, "library")
answers = get_defaut_question_answers(
admin_client,
MYSQL_EXTERNALID_131)
app = proj_client.create_app(
name=random_test_name(),
externalId=MYSQL_EXTERNALID_131,
targetNamespace=project_detail["cluster1"]["namespace1"].name,
projectId=project_detail["cluster1"]["project1"].id,
answers=answers)
print("App is active")
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_131)
answers["mysqlUser"] = "admin1234"
app = proj_client.update(
obj=app,
externalId=MYSQL_EXTERNALID_131,
targetNamespace=project_detail["cluster1"]["namespace1"].name,
projectId=project_detail["cluster1"]["project1"].id,
answers=answers)
app = proj_client.reload(app)
app = validate_catalog_app(proj_client, app, MYSQL_EXTERNALID_131, answers)
assert app["answers"].mysqlUser == "admin1234", \
"incorrect answer upgrade"
proj_client.delete(app)
@if_test_rbac
def test_rbac_app_project_catalog_list_1():
catalog, project_catalog_external_id = create_project_catalog()
# Verify user_c1_p1_owner CAN list the added catalog
validate_user_list_catalog("user_c1_p1_owner", clustercatalog=False)
# deploy an app
proj_client_user = get_project_client_for_token(
project_detail["cluster1"]["project1"],
user_token["user_c1_p1_owner"]["token"])
validate_catalog_app_deploy(proj_client_user,
project_detail["cluster1"]["namespace1"].name,
project_detail["cluster1"]["project1"].id,
project_catalog_external_id)
@if_test_rbac
def test_rbac_app_project_catalog_list_2():
catalog, project_catalog_external_id = create_project_catalog()
# Verify user_c1_p1_member CAN list the added catalog
validate_user_list_catalog("user_c1_p1_member", clustercatalog=False)
proj_client_user2 = get_project_client_for_token(
project_detail["cluster1"]["project1"],
user_token["user_c1_p1_member"]["token"])
validate_catalog_app_deploy(proj_client_user2,
project_detail["cluster1"]["namespace1"].name,
project_detail["cluster1"]["project1"].id,
project_catalog_external_id)
@if_test_rbac
def test_rbac_app_project_catalog_list_3():
catalog, project_catalog_external_id = create_project_catalog()
# Verify user_c1_p2_owner CANNOT list the added catalog
validate_user_list_catalog("user_c1_p2_owner", False, False)
@if_test_rbac
def test_rbac_app_project_catalog_list_4():
catalog, project_catalog_external_id = create_project_catalog()
# Verify user_standard CANNOT list the added catalog
validate_user_list_catalog("user_standard", False, False)
@if_test_rbac
def test_rbac_app_project_catalog_list_5():
catalog, project_catalog_external_id = create_project_catalog()
# Verify user_c2_p1_owner CANNOT list the added catalog
validate_user_list_catalog("user_c2_p1_owner", False, False)
@if_test_rbac
def test_rbac_app_project_catalog_list_6():
catalog, project_catalog_external_id = create_project_catalog()
# Verify user_c1_owner CAN list the added catalog
validate_user_list_catalog("user_c1_owner", clustercatalog=False)
proj_client_user3 = get_project_client_for_token(
project_detail["cluster1"]["project1"],
user_token["user_c1_owner"]["token"])
validate_catalog_app_deploy(proj_client_user3,
project_detail["cluster1"]["namespace1"].name,
project_detail["cluster1"]["project1"].id,
project_catalog_external_id)
@if_test_rbac
def test_rbac_app_project_catalog_list_7():
catalog, project_catalog_external_id = create_project_catalog()
# Verify user_c1_member CANNOT list the added catalog
validate_user_list_catalog("user_c1_member", False, False)
@if_test_rbac
def test_rbac_app_project_catalog_list_8():
catalog, project_catalog_external_id = create_project_catalog()
# Verify user_c2_owner CANNOT list the added catalog
validate_user_list_catalog("user_c2_owner", False, False)
@if_test_rbac
def test_rbac_app_cluster_catalog_list_1():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_c1_p1_owner CAN list the catalog
validate_user_list_catalog("user_c1_p1_owner")
proj_client_user = get_project_client_for_token(
project_detail["cluster1"]["project1"],
user_token["user_c1_p1_owner"]["token"])
validate_catalog_app_deploy(proj_client_user,
project_detail["cluster1"]["namespace1"].name,
project_detail["cluster1"]["project1"].id,
cluster_catalog_external_id)
@if_test_rbac
def test_rbac_app_cluster_catalog_list_2():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_c1_p1_member CAN list the catalog
validate_user_list_catalog("user_c1_p1_member")
proj_client_user = get_project_client_for_token(
project_detail["cluster1"]["project1"],
user_token["user_c1_p1_member"]["token"])
validate_catalog_app_deploy(proj_client_user,
project_detail["cluster1"]["namespace1"].name,
project_detail["cluster1"]["project1"].id,
cluster_catalog_external_id)
@if_test_rbac
def test_rbac_app_cluster_catalog_list_3():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_c1_p2_owner CAN list the catalog
validate_user_list_catalog("user_c1_p2_owner")
proj_client_user = get_project_client_for_token(
project_detail["cluster1"]["project2"],
user_token["user_c1_p2_owner"]["token"])
validate_catalog_app_deploy(proj_client_user,
project_detail["cluster1"]["namespace2"].name,
project_detail["cluster1"]["project2"].id,
cluster_catalog_external_id)
@if_test_rbac
def test_rbac_app_cluster_catalog_list_4():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_c2_p1_owner CANNOT list the catalog
validate_user_list_catalog("user_c2_p1_owner", False)
@if_test_rbac
def test_rbac_app_cluster_catalog_list_5():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_standard CANNOT list the catalog
validate_user_list_catalog("user_standard", False)
@if_test_rbac
def test_rbac_app_cluster_catalog_list_6():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# user_c1_owner CAN list the catalog
validate_user_list_catalog("user_c1_owner")
proj_client_user = get_project_client_for_token(
project_detail["cluster1"]["project1"],
user_token["user_c1_owner"]["token"])
validate_catalog_app_deploy(proj_client_user,
project_detail["cluster1"]["namespace1"].name,
project_detail["cluster1"]["project1"].id,
cluster_catalog_external_id)
@if_test_rbac
def test_rbac_app_cluster_catalog_list_7():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# user_c1_member CAN list the catalog
validate_user_list_catalog("user_c1_member")
p3, n3 = create_project_and_ns(
user_token["user_c1_member"]["token"],
project_detail["cluster1"]["cluster"],
random_test_name("testapp"))
proj_client_user = get_project_client_for_token(
p3, user_token["user_c1_member"]["token"])
validate_catalog_app_deploy(proj_client_user,
n3.name,
p3.id,
cluster_catalog_external_id)
user_client = get_user_client()
user_client.delete(p3)
@if_test_rbac
def test_rbac_app_cluster_catalog_list_8():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# user_c2_owner CANNOT list the catalog
validate_user_list_catalog("user_c2_owner", False)
@if_test_rbac
def test_rbac_app_project_scope_delete_1():
catalog, cluster_catalog_external_id = create_project_catalog()
# Verify user_c1_p1_owner CAN delete the added catalog
validate_catalog_deletion(catalog, "user_c1_p1_owner", True, False)
@if_test_rbac
def test_rbac_app_project_scope_delete_2():
catalog, cluster_catalog_external_id = create_project_catalog()
# Verify user_c1_p1_member CANNOT delete the added catalog
validate_catalog_deletion(catalog, "user_c1_p1_member", False, False)
@if_test_rbac
def test_rbac_app_project_scope_delete_3():
catalog, cluster_catalog_external_id = create_project_catalog()
# Verify user_c1_p1_member CANNOT delete the added catalog
validate_catalog_deletion(catalog, "user_c1_p1_member", False, False)
@if_test_rbac
def test_rbac_app_project_scope_delete_4():
catalog, cluster_catalog_external_id = create_project_catalog()
# Verify user_c1_p2_owner CANNOT delete the added catalog
validate_catalog_deletion(catalog, "user_c1_p2_owner", False, False)
@if_test_rbac
def test_rbac_app_project_scope_delete_5():
catalog, cluster_catalog_external_id = create_project_catalog()
# Verify user_c2_p1_owner CANNOT delete the added catalog
validate_catalog_deletion(catalog, "user_c2_p1_owner", False, False)
@if_test_rbac
def test_rbac_app_project_scope_delete_6():
catalog, cluster_catalog_external_id = create_project_catalog()
# Verify user_c1_owner CAN delete the added catalog
validate_catalog_deletion(catalog, "user_c1_owner", True, False)
@if_test_rbac
def test_rbac_app_project_scope_delete_7():
catalog, cluster_catalog_external_id = create_project_catalog()
# Verify user_c1_member CANNOT delete the added catalog
validate_catalog_deletion(catalog, "user_c1_member", False, False)
@if_test_rbac
def test_rbac_app_project_scope_delete_8():
catalog, cluster_catalog_external_id = create_project_catalog()
# Verify user_c2_owner CANNOT delete the added catalog
validate_catalog_deletion(catalog, "user_c2_owner", False, False)
@if_test_rbac
def test_rbac_app_project_scope_delete_9():
catalog, cluster_catalog_external_id = create_project_catalog()
# Verify user_standard CANNOT delete the added catalog
validate_catalog_deletion(catalog, "user_standard", False, False)
@if_test_rbac
def test_rbac_app_cluster_scope_delete_1():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_c1_p1_owner CANNOT delete the catalog
validate_catalog_deletion(catalog, "user_c1_p1_owner", False)
@if_test_rbac
def test_rbac_app_cluster_scope_delete_2():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_c1_p1_member CANNOT delete the catalog
validate_catalog_deletion(catalog, "user_c1_p1_member", False)
@if_test_rbac
def test_rbac_app_cluster_scope_delete_3():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_c1_p2_owner CANNOT delete the catalog
validate_catalog_deletion(catalog, "user_c1_p2_owner", False)
@if_test_rbac
def test_rbac_app_cluster_scope_delete_4():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_c2_p1_owner CANNOT delete the catalog
validate_catalog_deletion(catalog, "user_c2_p1_owner", False)
@if_test_rbac
def test_rbac_app_cluster_scope_delete_5():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_c1_owner CAN delete the catalog
validate_catalog_deletion(catalog, "user_c1_owner", True)
@if_test_rbac
def test_rbac_app_cluster_scope_delete_6():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_c1_member CANNOT delete the catalog
validate_catalog_deletion(catalog, "user_c1_member", False)
@if_test_rbac
def test_rbac_app_cluster_scope_delete_7():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_c2_owner CANNOT delete the catalog
validate_catalog_deletion(catalog, "user_c2_owner", False)
@if_test_rbac
def test_rbac_app_cluster_scope_delete_8():
catalog, cluster_catalog_external_id = create_cluster_catalog()
# verify user_standard CANNOT delete the catalog
validate_catalog_deletion(catalog, "user_standard", False)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
user_token["user_c1_p1_owner"]["user"] = \
rbac_data["users"][PROJECT_OWNER]["user"]
user_token["user_c1_p1_owner"]["token"] = \
rbac_data["users"][PROJECT_OWNER]["token"]
user_token["user_c1_p1_member"]["user"] = \
rbac_data["users"][PROJECT_MEMBER]["user"]
user_token["user_c1_p1_member"]["token"] = \
rbac_data["users"][PROJECT_MEMBER]["token"]
user_token["user_c1_owner"]["user"] = \
rbac_data["users"][CLUSTER_OWNER]["user"]
user_token["user_c1_owner"]["token"] = \
rbac_data["users"][CLUSTER_OWNER]["token"]
user_token["user_c1_member"]["user"] = \
rbac_data["users"][CLUSTER_MEMBER]["user"]
user_token["user_c1_member"]["token"] = \
rbac_data["users"][CLUSTER_MEMBER]["token"]
# create a cluster
node_roles = [["controlplane", "etcd", "worker"],
["worker"], ["worker"]]
cluster_list = []
cluster, aws_nodes = create_and_validate_custom_host(node_roles, True)
client, cluster_existing = get_user_client_and_cluster()
admin_client = get_admin_client()
cluster_list.append(cluster_existing)
cluster_list.append(cluster)
assert len(cluster_list) > 1
project_detail["cluster1"]["project2"], \
project_detail["cluster1"]["namespace2"] = \
create_project_and_ns(USER_TOKEN,
cluster_list[0],
random_test_name("testapp"))
project_detail["cluster2"]["project1"], \
project_detail["cluster2"]["namespace1"] = \
create_project_and_ns(USER_TOKEN,
cluster_list[1],
random_test_name("testapp"))
project_detail["cluster1"]["cluster"] = cluster_list[0]
project_detail["cluster2"]["cluster"] = cluster_list[1]
catalog = admin_client.create_catalog(
name=CATALOG_NAME,
baseType="catalog",
branch=BRANCH,
kind="helm",
url=CATALOG_URL)
time.sleep(5)
user_c_client = get_cluster_client_for_token(cluster_list[0], USER_TOKEN)
project_detail["cluster1"]["project1"] = rbac_data["project"]
project_detail["cluster1"]["namespace1"] = \
create_ns(user_c_client, cluster_list[0], rbac_data["project"])
# create users
user_token["user_c1_p2_owner"]["user"], \
user_token["user_c1_p2_owner"]["token"] = create_user(admin_client)
user_token["user_c2_p1_owner"]["user"], \
user_token["user_c2_p1_owner"]["token"] = create_user(admin_client)
user_token["user_c2_owner"]["user"], \
user_token["user_c2_owner"]["token"] = create_user(admin_client)
user_token["user_standard"]["user"], \
user_token["user_standard"]["token"] = create_user(admin_client)
# Assign roles to the users
assign_members_to_project(admin_client,
user_token["user_c1_p2_owner"]["user"],
project_detail["cluster1"]["project2"],
"project-owner")
assign_members_to_project(admin_client,
user_token["user_c2_p1_owner"]["user"],
project_detail["cluster2"]["project1"],
"project-owner")
assign_members_to_cluster(admin_client,
user_token["user_c2_owner"]["user"],
project_detail["cluster2"]["cluster"],
"cluster-owner")
def fin():
admin_client.delete(project_detail["cluster1"]["project2"])
admin_client.delete(project_detail["cluster2"]["project1"])
admin_client.delete(catalog)
admin_client.delete(user_token["user_c1_p2_owner"]["user"])
admin_client.delete(user_token["user_c2_p1_owner"]["user"])
admin_client.delete(user_token["user_c2_owner"]["user"])
admin_client.delete(user_token["user_standard"]["user"])
admin_client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
request.addfinalizer(fin)
def validate_user_list_catalog(user, listcatalog=True, clustercatalog=True):
user_client = get_client_for_token(
user_token[user]["token"])
if clustercatalog:
catalogs_list = user_client.list_clusterCatalog(name=CLUSTER_CATALOG)
catalogName = CLUSTER_CATALOG
else:
catalogs_list = user_client.list_projectCatalog(name=PROJECT_CATALOG)
catalogName = PROJECT_CATALOG
if listcatalog:
print("Length of catalog list:", len(catalogs_list))
assert len(catalogs_list) == 1, \
"Catalog not found for the user"
assert catalogs_list["data"][0]["name"] == catalogName, \
"Incorrect catalog found"
else:
assert len(catalogs_list) == 0, \
"Catalog found for the user"
def validate_catalog_app_deploy(proj_client_user, namespace,
projectid, catalog_ext_id):
try:
app = proj_client_user.create_app(name=random_test_name(),
externalId=catalog_ext_id,
answers=get_defaut_question_answers(
get_user_client(),
catalog_ext_id),
targetNamespace=namespace,
projectId=projectid)
pass
except:
assert False, "User is not able to deploy app from catalog"
validate_catalog_app(proj_client_user, app, catalog_ext_id)
proj_client_user.delete(app)
def validate_catalog_deletion(catalog,
user, candelete=True, clustercatalog=True):
user_client = get_client_for_token(user_token[user]["token"])
catalog_name = catalog.name
if not candelete:
with pytest.raises(ApiError) as e:
user_client.delete(catalog)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
user_client.delete(catalog)
if clustercatalog:
catalogs_list = user_client.list_clusterCatalog(name=catalog_name)
else:
catalogs_list = user_client.list_projectCatalog(name=catalog_name)
assert len(catalogs_list) == 0, \
"Catalog has not been deleted for the user"
def create_project_catalog():
"""create a catalog by user1 at the project level
and allow other users to access it"""
added_catalog = None
catalog_external_id = None
user_client = get_user_client()
catalogs_list = user_client.list_projectCatalog(name=PROJECT_CATALOG)
pId = project_detail["cluster1"]["project1"].id.split(":")[1]
if len(catalogs_list["data"]) != 0:
catalog_proj_scoped_ext_id = \
create_catalog_external_id(catalogs_list["data"][0]["name"],
"mysql",
"1.3.2",
pId,
"project")
added_catalog = catalogs_list["data"][0]
catalog_external_id = catalog_proj_scoped_ext_id
else:
catalog = user_client.create_projectCatalog(
name=PROJECT_CATALOG,
baseType="projectCatalog",
branch=BRANCH,
url=CATALOG_URL,
projectId=project_detail["cluster1"]["project1"].id)
time.sleep(10)
assert catalog.state == "active", "Catalog is not in Active state."
catalog_proj_scoped_ext_id = \
create_catalog_external_id(catalog.name,
"mysql",
"1.3.2",
pId,
"project")
print(catalog_proj_scoped_ext_id)
answers = get_defaut_question_answers(
user_client,
catalog_proj_scoped_ext_id)
proj_client = get_project_client_for_token(
project_detail["cluster1"]["project1"],
USER_TOKEN)
app = proj_client.create_app(
name=random_test_name(),
externalId=catalog_proj_scoped_ext_id,
answers=answers,
targetNamespace=project_detail["cluster1"]["namespace1"].name,
projectId=project_detail["cluster1"]["project1"].id)
validate_catalog_app(proj_client, app, catalog_proj_scoped_ext_id)
proj_client.delete(app)
added_catalog = catalog
catalog_external_id = catalog_proj_scoped_ext_id
return added_catalog, catalog_external_id
def create_cluster_catalog():
"""create a catalog by user1 at the cluster level
and allow other users to access it"""
added_catalog = None
catalog_external_id = None
user_client = get_user_client()
catalogs_list = user_client.list_clusterCatalog(name=CLUSTER_CATALOG)
pId = project_detail["cluster1"]["cluster"].id
# catalog = catalogs_list[0]
if len(catalogs_list["data"]) != 0:
catalog_cluster_scoped_ext_id = \
create_catalog_external_id(catalogs_list["data"][0]["name"],
"mysql",
"1.3.2",
pId,
"cluster")
added_catalog = catalogs_list["data"][0]
catalog_external_id = catalog_cluster_scoped_ext_id
else:
proj_client = get_project_client_for_token(
project_detail["cluster1"]["project1"],
USER_TOKEN)
print(project_detail["cluster1"]["cluster"].id)
print(project_detail["cluster1"]["cluster"])
catalog = user_client.create_clusterCatalog(
name=CLUSTER_CATALOG,
baseType="clustercatalog",
branch=BRANCH,
url=CATALOG_URL,
clusterId=project_detail["cluster1"]["cluster"].id)
time.sleep(10)
assert catalog.state == "active", "Catalog is not in Active state."
catalog_cluster_scoped_ext_id = \
create_catalog_external_id(catalog.name,
"mysql",
"1.3.2",
pId,
"cluster")
answers = get_defaut_question_answers(
user_client,
catalog_cluster_scoped_ext_id)
app = proj_client.create_app(
name=random_test_name(),
externalId=catalog_cluster_scoped_ext_id,
answers=answers,
targetNamespace=project_detail["cluster1"]["namespace1"].name,
projectId=project_detail["cluster1"]["project1"].id)
validate_catalog_app(proj_client, app, catalog_cluster_scoped_ext_id)
proj_client.delete(app)
added_catalog = catalog
catalog_external_id = catalog_cluster_scoped_ext_id
return added_catalog, catalog_external_id
| 33,155 | 39.189091 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_bkp_restore_s3_with_creds.py
|
import pytest
from .common import * # NOQA
from .test_rke_cluster_provisioning import rke_config, validate_rke_dm_host_2,\
node_template_linode
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None,
"nodes": []}
backup_info = {"backupname": None, "backup_id": None, "workload": None,
"backupfilename": None, "etcdbackupdata": None}
@if_test_all_snapshot
def test_bkp_restore_s3_with_creds_create():
validate_backup_create(namespace, backup_info, "s3")
@if_test_all_snapshot
def test_bkp_restore_s3_with_creds_restore():
ns, binfo = validate_backup_create(namespace, backup_info, "s3")
validate_backup_restore(ns, binfo)
@if_test_all_snapshot
def test_bkp_restore_s3_with_creds_delete():
ns, binfo = validate_backup_create(namespace, backup_info, "s3")
ns, binfo = validate_backup_restore(ns, binfo)
validate_backup_delete(ns, binfo, "s3")
@pytest.fixture(scope='module', autouse="True")
def create_project_client_and_cluster_s3_with_creds(node_template_linode,
request):
rke_config["services"]["etcd"]["backupConfig"] = {
"enabled": "true",
"intervalHours": 12,
"retention": 6,
"type": "backupConfig",
"s3BackupConfig": {
"type": "s3BackupConfig",
"accessKey": AWS_ACCESS_KEY_ID,
"secretKey": AWS_SECRET_ACCESS_KEY,
"bucketName": AWS_S3_BUCKET_NAME,
"folder": AWS_S3_BUCKET_FOLDER_NAME,
"region": AWS_REGION,
"endpoint": "s3.amazonaws.com"
}
}
cluster_name = random_name()
validate_rke_dm_host_2(node_template_linode,
rke_config, False, cluster_name)
client = get_user_client()
cluster = get_cluster_by_name(client, cluster_name)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testnoiam")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
def fin():
client = get_user_client()
cluster_cleanup(client, cluster)
request.addfinalizer(fin)
| 2,332 | 33.308824 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_cert_rotation.py
|
import pytest
import datetime
import time
import os
import ast
from .common import rbac_get_user_token_by_role
from .common import get_client_for_token
from .common import get_user_client_and_cluster
from .common import validate_cluster_state
from .common import get_etcd_nodes
from rancher import ApiError
# Globals
# Master list of all certs
ALL_CERTS = ["kube-apiserver", "kube-controller-manager",
"kube-node", "kube-proxy", "kube-scheduler",
"kube-etcd", "kube-ca"]
TEST_RBAC = ast.literal_eval(os.environ.get('RANCHER_TEST_RBAC', "False"))
if_test_rbac = pytest.mark.skipif(TEST_RBAC is False,
reason='rbac tests are skipped')
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
# here are all supported roles for RBAC testing
CLUSTER_MEMBER = "cluster-member"
CLUSTER_OWNER = "cluster-owner"
PROJECT_MEMBER = "project-member"
PROJECT_OWNER = "project-owner"
PROJECT_READ_ONLY = "read-only"
rbac_data = {
"project": None,
"namespace": None,
"workload": None,
"p_unshared": None,
"ns_unshared": None,
"wl_unshared": None,
"users": {
CLUSTER_OWNER: {},
CLUSTER_MEMBER: {},
PROJECT_OWNER: {},
PROJECT_MEMBER: {},
PROJECT_READ_ONLY: {},
}
}
# --------------------- rbac test -----------------------
@if_test_rbac
@pytest.mark.parametrize("role", [CLUSTER_MEMBER,
PROJECT_MEMBER, PROJECT_OWNER,
PROJECT_READ_ONLY, CLUSTER_OWNER])
def test_rbac_cert_rotation(role):
user_token = rbac_get_user_token_by_role(role)
user_client = get_client_for_token(user_token)
user_cluster = user_client.list_cluster(name=CLUSTER_NAME).data[0]
if role == CLUSTER_OWNER:
now = datetime.datetime.now()
user_cluster.rotateCertificates()
changed = ALL_CERTS.copy()
changed.remove("kube-ca")
client, cluster = get_user_client_and_cluster()
validate_cluster_state(client, cluster,
intermediate_state="updating")
certs2 = get_certs()
compare_changed(certs2, now, changed)
return None
with pytest.raises(ApiError) as e:
user_cluster.rotateCertificates()
assert e.value.error.status == 403
assert e.value.error.code == 'PermissionDenied'
def test_rotate_all_certs():
changed = ALL_CERTS.copy()
changed.remove("kube-ca")
unchanged = ["kube-ca"]
rotate_and_compare(unchanged, changed)
def test_rotate_kube_apiserver():
changed = ["kube-apiserver"]
unchanged = ALL_CERTS.copy()
unchanged.remove("kube-apiserver")
rotate_and_compare(unchanged, changed, "kube-apiserver")
def test_rotate_kube_controller_manager():
changed = ["kube-controller-manager"]
unchanged = ALL_CERTS.copy()
unchanged.remove("kube-controller-manager")
rotate_and_compare(unchanged, changed, "kube-controller-manager")
def test_rotate_kube_etcd():
changed = ["kube-etcd"]
unchanged = ALL_CERTS.copy()
unchanged.remove("kube-etcd")
rotate_and_compare(unchanged, changed, "etcd")
def test_rotate_kube_node():
changed = ["kube-node"]
unchanged = ALL_CERTS.copy()
unchanged.remove("kube-node")
rotate_and_compare(unchanged, changed, "kubelet")
def test_rotate_kube_proxy():
changed = ["kube-proxy"]
unchanged = ALL_CERTS.copy()
unchanged.remove("kube-proxy")
rotate_and_compare(unchanged, changed, "kube-proxy")
def test_rotate_kube_scheduler():
changed = ["kube-scheduler"]
unchanged = ALL_CERTS.copy()
unchanged.remove("kube-scheduler")
rotate_and_compare(unchanged, changed, "kube-scheduler")
def test_rotate_kube_ca():
changed = ALL_CERTS
unchanged = []
rotate_and_compare(unchanged, changed, "kube-ca")
# Gets the certificate expiration date and cert name. Stores them in a dict.
def get_certs():
certs = {}
client, cluster = get_user_client_and_cluster()
for key in cluster.certificatesExpiration:
if "kube-etcd" not in key:
certs[key] = parse_datetime(cluster.certificatesExpiration[key]
["expirationDate"])
# Get etcd node certs from node IP
nodes = get_etcd_nodes(cluster)
for node in nodes:
if node["labels"]["node-role.kubernetes.io/etcd"] == "true":
ipKey = "kube-etcd-"+node["ipAddress"].replace(".", "-")
certs[ipKey] = parse_datetime(cluster.certificatesExpiration[ipKey]
["expirationDate"])
return certs
# Turn expiration string into datetime
def parse_datetime(expiration_string):
return datetime.datetime.strptime(expiration_string, '%Y-%m-%dT%H:%M:%SZ')
def compare_changed(certs2, time_now, changed):
if "kube-etcd" in changed:
for key in certs2:
if "kube-etcd" in key:
changed.append(key)
changed.remove("kube-etcd")
for i in changed:
assert(certs2[i] > (time_now + datetime.timedelta(days=3650)))
def compare_unchanged(certs1, certs2, unchanged):
if "kube-etcd" in unchanged:
for key in certs2:
if "kube-etcd" in key:
unchanged.append(key)
unchanged.remove("kube-etcd")
for i in unchanged:
assert(certs1[i] == certs2[i])
def rotate_certs(service=""):
client, cluster = get_user_client_and_cluster()
if service:
if service == "kube-ca":
cluster.rotateCertificates(caCertificates=True)
else:
cluster.rotateCertificates(services=service)
else:
cluster.rotateCertificates()
def rotate_and_compare(unchanged, changed, service=""):
client, cluster = get_user_client_and_cluster()
# Grab certs before rotation
certs1 = get_certs()
now = datetime.datetime.now()
# Rotate certs
rotate_certs(service)
# wait for cluster to update
cluster = validate_cluster_state(client, cluster,
intermediate_state="updating")
if service == "kube-ca":
time.sleep(60)
# Grab certs after rotate
certs2 = get_certs()
# Checks the new certs against old certs.
compare_changed(certs2, now, changed)
compare_unchanged(certs1, certs2, unchanged)
time.sleep(120)
# get all nodes and assert status
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node["state"] != "active":
raise AssertionError(
"Timed out waiting for state to get to active")
| 6,593 | 31.323529 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_hpa.py
|
import pytest
from rancher import ApiError
from .common import * # NOQA
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
def test_create_hpa():
p_client = namespace["p_client"]
ns = namespace["ns"]
hpa, workload = create_hpa(p_client, ns)
p_client.delete(hpa, workload)
def test_edit_hpa():
p_client = namespace["p_client"]
ns = namespace["ns"]
hpa, workload = edit_hpa(p_client, ns)
p_client.delete(hpa, workload)
def test_delete_hpa():
p_client = namespace["p_client"]
ns = namespace["ns"]
hpa, workload = create_hpa(p_client, ns)
delete_hpa(p_client, hpa, ns)
p_client.delete(workload)
rbac_role_list = [
(CLUSTER_OWNER),
(PROJECT_OWNER),
(PROJECT_MEMBER),
(PROJECT_READ_ONLY),
(CLUSTER_MEMBER),
]
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_create(role, remove_resource):
user_project = None
if(role == CLUSTER_MEMBER):
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_project, ns = create_project_and_ns(user_token,
namespace["cluster"],
random_test_name(
"cluster-mem"))
p_client = get_project_client_for_token(user_project, user_token)
else:
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
if (role != PROJECT_READ_ONLY):
newhpa, newworkload = create_hpa(p_client, ns)
remove_resource(newhpa)
remove_resource(newworkload)
else:
project = rbac_get_project()
ns = rbac_get_namespace()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project,
user_token)
# Verify Read Only member cannot create hpa objects
with pytest.raises(ApiError) as e:
create_hpa(readonly_user_client, ns)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
if(user_project is not None):
remove_resource(user_project)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_create_negative(role, remove_resource):
if (role == CLUSTER_OWNER):
print(role)
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
hpa, workload = create_hpa(p_client, ns)
remove_resource(hpa)
remove_resource(workload)
else:
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
owner_client = get_project_client_for_token(unshared_project,
cluster_owner_token)
# Workload created by cluster owner in unshared project is passed as
# parameter to create HPA
workload = create_workload(owner_client, ns)
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
with pytest.raises(ApiError) as e:
create_hpa(p_client, ns, workload=workload)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_edit(role, remove_resource):
if (role == PROJECT_READ_ONLY):
verify_hpa_project_readonly_edit(remove_resource)
elif(role == CLUSTER_MEMBER):
verify_hpa_cluster_member_edit(remove_resource)
else:
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
hpa, workload = edit_hpa(p_client, ns)
remove_resource(hpa)
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_edit_negative(role, remove_resource):
if (role == CLUSTER_OWNER):
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
hpa, workload = edit_hpa(p_client, ns)
remove_resource(hpa)
remove_resource(workload)
else:
unshared_project = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(role)
unshared_ns = rbac_get_unshared_ns()
user_client = get_project_client_for_token(unshared_project,
user_token)
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
# Cluster owner client created in the unshared project
cluster_owner_p_client = \
get_project_client_for_token(unshared_project, cluster_owner_token)
# Verify that some users cannot edit hpa created by cluster owner
verify_edit_forbidden(user_client, remove_resource,
cluster_owner_client=cluster_owner_p_client,
ns=unshared_ns)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_delete(role, remove_resource):
user_project = None
if(role == CLUSTER_MEMBER):
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_project, ns = create_project_and_ns(user_token,
namespace["cluster"],
random_test_name(
"cluster-mem"))
p_client = get_project_client_for_token(user_project, user_token)
else:
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
if (role != PROJECT_READ_ONLY):
hpa, workload = create_hpa(p_client, ns)
delete_hpa(p_client, hpa, ns)
remove_resource(workload)
remove_resource(hpa)
if user_project is not None:
remove_resource(user_project)
if (role == PROJECT_READ_ONLY):
project = rbac_get_project()
ns = rbac_get_namespace()
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = \
get_project_client_for_token(project, cluster_owner_token)
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
user_client = get_project_client_for_token(project, user_token)
# As a Cluster owner create a HPA object
hpa, workload = create_hpa(cluster_owner_p_client, ns)
# Verify that the Read Only member cannot delete the HPA objects
# created by Cluster Owner
with pytest.raises(ApiError) as e:
delete_hpa(user_client, hpa, ns)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(hpa)
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_delete_negative(role, remove_resource):
if (role == CLUSTER_OWNER):
print(role)
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
hpa, workload = create_hpa(p_client, ns)
delete_hpa(p_client, hpa, ns)
remove_resource(hpa)
remove_resource(workload)
else:
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
owner_client = get_project_client_for_token(unshared_project,
cluster_owner_token)
workload = create_workload(owner_client, ns)
user_token = rbac_get_user_token_by_role(role)
# Workload created by cluster owner in unshared project is passed as
# parameter to create HPA
hpa, workload = create_hpa(owner_client, ns, workload=workload)
p_client = get_project_client_for_token(unshared_project, user_token)
with pytest.raises(ApiError) as e:
delete_hpa(p_client, hpa, ns)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(hpa)
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_list(remove_resource, role):
user_project = None
if(role == CLUSTER_MEMBER):
cluster_member_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_project, ns = \
create_project_and_ns(cluster_member_token,
namespace["cluster"],
random_test_name("cluster-mem"))
user_client = get_project_client_for_token(user_project,
cluster_member_token)
# As a cluster member create a HPA and he should be able to list it
hpa, workload = create_hpa(user_client, ns)
else:
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
cluster_owner_p_client = \
get_project_client_for_token(project, cluster_owner_token)
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
user_client = get_project_client_for_token(project, user_token)
hpa, workload = create_hpa(cluster_owner_p_client, ns)
hpaname = hpa.name
hpadict = user_client.list_horizontalPodAutoscaler(name=hpaname)
print(hpadict)
hpadata = hpadict.get('data')
assert len(hpadata) == 1
assert hpadata[0].type == "horizontalPodAutoscaler"
assert hpadata[0].name == hpaname
remove_resource(hpa)
remove_resource(workload)
if user_client is not None:
remove_resource(user_project)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_list_negative(remove_resource, role):
if (role == CLUSTER_OWNER):
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
hpa, workload = create_hpa(p_client, ns)
hpaname = hpa.name
hpadict = p_client.list_horizontalPodAutoscaler(name=hpaname)
hpadata = hpadict.get('data')
assert len(hpadata) == 1
assert hpadata[0].type == "horizontalPodAutoscaler"
assert hpadata[0].name == hpaname
remove_resource(hpa)
remove_resource(workload)
else:
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
cluster_owner_client = \
get_project_client_for_token(unshared_project, cluster_owner_token)
user_token = rbac_get_user_token_by_role(role)
user_client = get_project_client_for_token(unshared_project,
user_token)
hpa, workload = create_hpa(cluster_owner_client, ns)
hpaname = hpa.name
# Verify length of HPA list is zero
hpadict = user_client.list_horizontalPodAutoscaler(name=hpaname)
hpadata = hpadict.get('data')
assert len(hpadata) == 0
remove_resource(hpa)
remove_resource(workload)
def verify_hpa_cluster_member_edit(remove_resource):
cluster_member_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_project, ns = create_project_and_ns(cluster_member_token,
namespace["cluster"],
random_test_name("cluster-mem"))
cluster_member_client = get_project_client_for_token(user_project,
cluster_member_token)
# Verify the cluster member can edit the hpa he created
hpa, workload = edit_hpa(cluster_member_client, ns)
# Verify that cluster member cannot edit the hpa created by cluster owner
verify_edit_forbidden(cluster_member_client, remove_resource)
remove_resource(hpa)
remove_resource(workload)
remove_resource(user_project)
def verify_hpa_project_readonly_edit(remove_resource):
project = rbac_get_project()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project, user_token)
# Verify that read -only user cannot edit the hpa created by cluster owner
verify_edit_forbidden(readonly_user_client, remove_resource)
def verify_edit_forbidden(user_client, remove_resource,
cluster_owner_client=None, ns=None):
metrics = [{
'name': 'cpu',
'type': 'Resource',
'target': {
'type': 'Utilization',
'utilization': '50',
},
}]
if(cluster_owner_client is None and ns is None):
project = rbac_get_project()
ns = rbac_get_namespace()
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_client = \
get_project_client_for_token(project, cluster_owner_token)
# Create HPA as a cluster owner
hpa, workload = create_hpa(cluster_owner_client, ns)
# Verify editing HPA fails
with pytest.raises(ApiError) as e:
user_client.update(hpa,
name=hpa['name'],
namespaceId=ns.id,
maxReplicas=10,
minReplicas=3,
workload=workload.id,
metrics=metrics)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(hpa)
remove_resource(workload)
def create_hpa(p_client, ns, workload=None):
# Create workload of scale 1 with CPU reservation
# Create hpa pointing to the workload.
if workload is None:
workload = create_workload(p_client, ns)
name = random_test_name("hpa")
metrics = [{'name': 'cpu',
'type': 'Resource',
'target': {
'type': 'Utilization',
'utilization': '50',
},
}]
hpa = p_client.create_horizontalPodAutoscaler(
name=name,
namespaceId=ns.id,
maxReplicas=5,
minReplicas=2,
workloadId=workload.id,
metrics=metrics
)
hpa = wait_for_hpa_to_active(p_client, hpa)
assert hpa.type == "horizontalPodAutoscaler"
assert hpa.name == name
assert hpa.minReplicas == 2
assert hpa.maxReplicas == 5
# After hpa becomes active, the workload scale should be equal to the
# minReplicas set in HPA object
workloadlist = p_client.list_workload(uuid=workload.uuid).data
validate_workload(p_client, workloadlist[0], "deployment", ns.name,
pod_count=hpa.minReplicas)
return (hpa, workload)
def edit_hpa(p_client, ns):
# Create workload of scale 1 with memory reservation
# Create hpa pointing to the workload.Edit HPA and verify HPA is functional
workload = create_workload(p_client, ns)
name = random_test_name("default")
metrics = [{
"type": "Resource",
"name": "memory",
"target": {
"type": "AverageValue",
"value": None,
"averageValue": "32Mi",
"utilization": None,
"stringValue": "32"
}
}]
hpa = p_client.create_horizontalPodAutoscaler(
name=name,
namespaceId=ns.id,
maxReplicas=4,
minReplicas=2,
workloadId=workload.id,
metrics=metrics
)
wait_for_hpa_to_active(p_client, hpa)
# After hpa becomes active, the workload scale should be equal to the
# minReplicas set in HPA
workloadlist = p_client.list_workload(uuid=workload.uuid).data
validate_workload(p_client, workloadlist[0], "deployment", ns.name,
pod_count=hpa.minReplicas)
# Edit the HPA
updated_hpa = p_client.update(hpa,
name=hpa['name'],
namespaceId=ns.id,
maxReplicas=6,
minReplicas=3,
workloadId=workload.id,
metrics=metrics)
wait_for_hpa_to_active(p_client, updated_hpa)
assert updated_hpa.type == "horizontalPodAutoscaler"
assert updated_hpa.minReplicas == 3
assert updated_hpa.maxReplicas == 6
# After hpa becomes active, the workload scale should be equal to the
# minReplicas set in the updated HPA
wait_for_pods_in_workload(p_client, workload, 3)
workloadlist = p_client.list_workload(uuid=workload.uuid).data
validate_workload(p_client, workloadlist[0], "deployment", ns.name,
pod_count=updated_hpa.minReplicas)
return (updated_hpa, workload)
def delete_hpa(p_client, hpa, ns):
hpaname = hpa['name']
p_client.delete(hpa)
# Sleep to allow HPA to be deleted
time.sleep(5)
timeout = 30
hpadict = p_client.list_horizontalPodAutoscaler(name=hpaname)
print(hpadict.get('data'))
start = time.time()
if len(hpadict.get('data')) > 0:
testdata = hpadict.get('data')
while hpaname in testdata[0]['data']:
if time.time() - start > timeout:
raise AssertionError("Timed out waiting for deletion")
time.sleep(.5)
hpadict = p_client.list_horizontalPodAutoscaler(name=hpaname)
testdata = hpadict.get('data')
assert True
if len(hpadict.get('data')) == 0:
assert True
# Verify hpa is deleted by "kubectl get hpa" command
command = "get hpa {} --namespace {}".format(hpa['name'], ns.name)
print("Command to obtain the hpa")
print(command)
result = execute_kubectl_cmd(command, json_out=False, stderr=True)
print(result)
print("Verify that the hpa does not exist "
"and the error code returned is non zero ")
if result != 0:
assert True
def create_workload(p_client, ns):
con = [{"name": "test1",
"image": TEST_IMAGE,
"resources": {
"requests": {
"memory": "64Mi",
"cpu": "100m"
},
"limits": {
"memory": "512Mi",
"cpu": "1000m"
}
}
}]
name = random_test_name("workload")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
print(workload.scale)
validate_workload(p_client, workload, "deployment", ns.name)
return workload
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(
ADMIN_TOKEN, cluster, random_test_name("testhpa"))
p_client = get_project_client_for_token(p, ADMIN_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
def fin():
client = get_admin_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
| 20,277 | 36.621521 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_ingress.py
|
"""
This test suite contains tests to validate ingress create/edit/delete with
different possible way and with different roles of users.
Test requirement:
Below Env variables need to set
CATTLE_TEST_URL - url to rancher server
ADMIN_TOKEN - Admin token from rancher
USER_TOKEN - User token from rancher
RANCHER_CLUSTER_NAME - Cluster name to run test on
RANCHER_TEST_RBAC - Boolean (Optional), To run role based tests.
"""
from .common import CLUSTER_MEMBER
from .common import CLUSTER_OWNER
from .common import PROJECT_READ_ONLY
from .common import PROJECT_OWNER
from .common import PROJECT_MEMBER
from .common import TEST_IMAGE
from .common import TEST_IMAGE_PORT
from .common import random_test_name
from .common import validate_workload
from .common import get_schedulable_nodes
from .common import validate_ingress
from .common import wait_for_pods_in_workload
from .common import validate_ingress_using_endpoint
from .common import rbac_get_user_token_by_role
from .common import pytest
from .common import rbac_get_project
from .common import rbac_get_namespace
from .common import if_test_rbac
from .common import get_project_client_for_token
from .common import ApiError
from .common import time
from .common import get_user_client_and_cluster
from .common import create_kubeconfig
from .common import create_project_and_ns
from .common import USER_TOKEN
from .common import get_user_client
from .common import DEFAULT_TIMEOUT
from .common import rbac_get_workload
from .common import wait_for_ingress_to_active
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
rbac_role_list = [
CLUSTER_OWNER,
CLUSTER_MEMBER,
PROJECT_OWNER,
PROJECT_MEMBER,
PROJECT_READ_ONLY
]
def test_ingress():
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test1.com"
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], host, path)
def test_ingress_with_same_rules_having_multiple_targets():
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
con = [{"name": "testm1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload1 = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload1, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
name = random_test_name("default")
workload2 = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload2, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "testm1.com"
path = "/name.html"
rule1 = {"host": host,
"paths": [{"workloadIds": [workload1.id],
"targetPort": TEST_IMAGE_PORT}]}
rule2 = {"host": host,
"paths": [{"workloadIds": [workload2.id],
"targetPort": TEST_IMAGE_PORT}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule1, rule2])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload1, workload2], host, path)
def test_ingress_edit_target():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload1 = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=2)
validate_workload(p_client, workload1, "deployment", ns.name, pod_count=2)
name = random_test_name("default")
workload2 = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=2)
validate_workload(p_client, workload2, "deployment", ns.name, pod_count=2)
host = "test2.com"
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [workload1.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload1], host, path)
rule = {"host": host,
"paths": [{"workloadIds": [workload2.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.update(ingress, rules=[rule])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload2], host, path)
def test_ingress_edit_host():
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test3.com"
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], host, path)
host = "test4.com"
rule = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.update(ingress, rules=[rule])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], host, path)
def test_ingress_edit_path():
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test5.com"
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], host, path)
path = "/service1.html"
rule = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.update(ingress, rules=[rule])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], host, path)
def test_ingress_edit_add_more_rules():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload1 = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=2)
validate_workload(p_client, workload1, "deployment", ns.name, pod_count=2)
name = random_test_name("default")
workload2 = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=2)
validate_workload(p_client, workload2, "deployment", ns.name, pod_count=2)
host1 = "test6.com"
path = "/name.html"
rule1 = {"host": host1,
"paths": [{"workloadIds": [workload1.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule1])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload1], host1, path)
host2 = "test7.com"
rule2 = {"host": host2,
"paths": [{"workloadIds": [workload2.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.update(ingress, rules=[rule1, rule2])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload2], host2, path)
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload1], host1, path)
def test_ingress_scale_up_target():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=2)
validate_workload(p_client, workload, "deployment", ns.name, pod_count=2)
host = "test8.com"
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], host, path)
workload = p_client.update(workload, scale=4, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, pod_count=4)
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], host, path)
def test_ingress_upgrade_target():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = {"name": "test1",
"image": TEST_IMAGE}
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=[con],
namespaceId=ns.id,
scale=2)
validate_workload(p_client, workload, "deployment", ns.name, pod_count=2)
host = "test9.com"
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], host, path)
con["environment"] = {"test1": "value1"}
workload = p_client.update(workload, containers=[con])
wait_for_pods_in_workload(p_client, workload, pod_count=2)
validate_workload(p_client, workload, "deployment", ns.name, pod_count=2)
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], host, path)
def test_ingress_rule_with_only_path():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = {"name": "test1",
"image": TEST_IMAGE}
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=[con],
namespaceId=ns.id,
scale=2)
validate_workload(p_client, workload, "deployment", ns.name, pod_count=2)
host = ""
path = "/service2.html"
rule = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], "", path, True)
def test_ingress_rule_with_only_host():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = {"name": "test1",
"image": TEST_IMAGE}
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=[con],
namespaceId=ns.id,
scale=2)
validate_workload(p_client, workload, "deployment", ns.name, pod_count=2)
host = "test10.com"
rule = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], host, "/name.html")
validate_ingress(namespace["p_client"], namespace["cluster"],
[workload], host, "/service1.html")
def test_ingress_xip_io():
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
path = "/name.html"
rule = {"host": "xip.io",
"paths": [{"path": path,
"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress_using_endpoint(namespace["p_client"], ingress, [workload])
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_ingress_create(role):
"""
This test creates first workload as cluster owner and then creates ingress
as user i.e. role in parameter and validates the ingress created.
@param role: User role in rancher eg. project owner, project member etc
"""
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
workload = rbac_get_workload()
p_client = get_project_client_for_token(project, token)
name = random_test_name("default")
host = "xip.io"
rule = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
p_client.delete(ingress)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_ingress_edit(role):
"""
This test creates two workloads and then creates ingress with two targets
and validates it.
@param role: User role in rancher eg. project owner, project member etc
"""
c_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
workload = rbac_get_workload()
p_client_for_c_owner = get_project_client_for_token(project, c_owner_token)
p_client = get_project_client_for_token(project, token)
host = "xip.io"
path = "/name.html"
rule_1 = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
rule_2 = {"host": host,
"paths": [{"path": path, "workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
name = random_test_name("default")
ingress = p_client_for_c_owner.create_ingress(name=name, namespaceId=ns.id,
rules=[rule_1])
wait_for_ingress_to_active(p_client_for_c_owner, ingress)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
ingress = p_client.update(ingress, rules=[rule_2])
wait_for_ingress_to_active(p_client, ingress)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
else:
ingress = p_client.update(ingress, rules=[rule_2])
wait_for_ingress_to_active(p_client, ingress)
p_client_for_c_owner.delete(ingress)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_ingress_delete(role):
"""
This test creates two workloads and then creates ingress with two targets
and validates it.
@param role: User role in rancher eg. project owner, project member etc
"""
c_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
workload = rbac_get_workload()
p_client_for_c_owner = get_project_client_for_token(project, c_owner_token)
p_client = get_project_client_for_token(project, token)
name = random_test_name("default")
host = "xip.io"
rule = {"host": host,
"paths": [{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client_for_c_owner.create_ingress(name=name, namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client_for_c_owner, ingress)
if role in (CLUSTER_MEMBER, PROJECT_READ_ONLY):
with pytest.raises(ApiError) as e:
p_client.delete(ingress)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
p_client_for_c_owner.delete(ingress)
else:
p_client.delete(ingress)
validate_ingress_deleted(p_client, ingress)
def validate_ingress_deleted(client, ingress, timeout=DEFAULT_TIMEOUT):
"""
Checks whether ingress got deleted successfully.
Validates if ingress is null in for current object client.
@param client: Object client use to create ingress
@param ingress: ingress object subjected to be deleted
@param timeout: Max time to keep checking whether ingress is deleted or not
"""
time.sleep(2)
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
while len(ingresses) != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for ingress to be deleted")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testingress")
p_client = get_project_client_for_token(p, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
| 21,612 | 39.779245 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_cis_scan.py
|
import pytest
import requests
import time
from rancher import ApiError
from lib.aws import AmazonWebServices
from .common import CLUSTER_MEMBER, configure_cis_requirements
from .common import CLUSTER_OWNER
from .common import CIS_SCAN_PROFILE
from .common import cluster_cleanup
from .common import get_user_client
from .common import get_user_client_and_cluster
from .common import get_custom_host_registration_cmd
from .common import get_project_client_for_token
from .common import get_client_for_token
from .common import get_cluster_by_name
from .common import if_test_rbac
from .common import PROJECT_OWNER
from .common import PROJECT_MEMBER
from .common import PROJECT_READ_ONLY
from .common import random_test_name
from .common import rbac_get_user_token_by_role
from .common import USER_TOKEN
from .common import validate_cluster_state
from .common import wait_for_cluster_node_count
from .test_rke_cluster_provisioning import HOST_NAME, \
POD_SECURITY_POLICY_TEMPLATE, get_cis_rke_config # NOQA
scan_results = {
"rke-cis-1.4": {
"permissive": {"pass": 63, "skip": 15},
"hardened": {"pass": 78, "skip": 0},
"not_applicable": 19, "total": 97, "fail": 0
},
"rke-cis-1.5": {
"permissive": {"pass": 58, "fail": 0, "skip": 14},
"hardened": {"pass": 72, "fail": 0, "skip": 0},
"not_applicable": 20, "total": 92, "fail": 0
}
}
DEFAULT_TIMEOUT = 120
cluster_detail = {
"cluster_14": None, "nodes_14": None, "name": None,
"cluster_15": None, "nodes_15": None
}
def test_cis_scan_run_scan_hardened_14():
cluster = cluster_detail["cluster_14"]
scan_detail = run_scan(cluster, USER_TOKEN, "hardened")
report_link = scan_detail["links"]["report"]
test_total, tests_passed, tests_skipped, tests_failed, tests_na = \
get_scan_results("rke-cis-1.4", "hardened")
verify_cis_scan_report(
report_link, token=USER_TOKEN,
test_total=test_total,
tests_passed=tests_passed,
tests_skipped=tests_skipped,
tests_failed=tests_failed,
tests_na=tests_na
)
def test_cis_scan_run_scan_hardened_15():
"""
This will fail because of 2 tests which fail - 5.1.5 and 5.3.2
:return:
"""
cluster = cluster_detail["cluster_15"]
scan_detail = run_scan(cluster, USER_TOKEN, "hardened",
scan_tool_version="rke-cis-1.5")
report_link = scan_detail["links"]["report"]
test_total, tests_passed, tests_skipped, tests_failed, tests_na = \
get_scan_results("rke-cis-1.5", "hardened")
verify_cis_scan_report(
report_link, token=USER_TOKEN,
test_total=test_total,
tests_passed=tests_passed,
tests_skipped=tests_skipped,
tests_failed=tests_failed,
tests_na=tests_na
)
def test_cis_scan_run_scan_permissive_14():
client, cluster = get_user_client_and_cluster()
scan_detail = run_scan(cluster, USER_TOKEN, "permissive")
report_link = scan_detail["links"]["report"]
test_total, tests_passed, tests_skipped, tests_failed, tests_na = \
get_scan_results("rke-cis-1.4", "permissive")
verify_cis_scan_report(
report_link, token=USER_TOKEN,
test_total=test_total,
tests_passed=tests_passed,
tests_skipped=tests_skipped,
tests_failed=tests_failed,
tests_na=tests_na
)
def test_cis_scan_run_scan_permissive_15():
"""
This will fail because of 1 tests which fails - 5.1.5
:return:
"""
client, cluster = get_user_client_and_cluster()
scan_detail = run_scan(cluster, USER_TOKEN, "permissive",
scan_tool_version="rke-cis-1.5")
report_link = scan_detail["links"]["report"]
test_total, tests_passed, tests_skipped, tests_failed, tests_na = \
get_scan_results("rke-cis-1.5", "permissive")
verify_cis_scan_report(
report_link, token=USER_TOKEN,
test_total=test_total,
tests_passed=tests_passed,
tests_skipped=tests_skipped,
tests_failed=tests_failed,
tests_na=tests_na
)
def test_cis_scan_skip_test_ui():
client = get_user_client()
cluster = cluster_detail["cluster_14"]
# run security scan
scan_detail = run_scan(cluster, USER_TOKEN, "hardened")
report_link = scan_detail["links"]["report"]
test_total, tests_passed, tests_skipped, tests_failed, tests_na = \
get_scan_results("rke-cis-1.4", "hardened")
verify_cis_scan_report(
report_link, token=USER_TOKEN,
test_total=test_total,
tests_passed=tests_passed,
tests_skipped=tests_skipped,
tests_failed=tests_failed,
tests_na=tests_na
)
# get system project
system_project = cluster.projects(name="System")["data"][0]
system_project_id = system_project["id"]
print(system_project)
p_client = get_project_client_for_token(system_project, USER_TOKEN)
# check config map is NOT generated for first scan
try:
p_client.list_configMap(projectId=system_project_id,
namespaceId="security-scan")
except ApiError as e:
assert e.error.status == 404, "Config Map is generated for first scan"
# delete security-scan-cf config if present
security_scan_config = \
p_client.list_configMap(projectId=system_project_id,
namespaceId="security-scan",
id="security-scan:security-scan-cfg",
name="security-scan-cfg")
print(security_scan_config)
if len(security_scan_config["data"]) != 0:
p_client.delete(security_scan_config["data"][0])
# skip action as on UI
cm_data = {"config.json": "{\"skip\":{\"rke-cis-1.4\":[\"1.1.2\"]}}"}
p_client.create_configMap(projectId=system_project_id,
name="security-scan-cfg",
namespaceId="security-scan",
id="security-scan:security-scan-cfg",
data=cm_data)
# run security scan
scan_detail_2 = run_scan(cluster, USER_TOKEN, "hardened")
client.reload(scan_detail_2)
report_link = scan_detail_2["links"]["report"]
report = verify_cis_scan_report(
report_link, token=USER_TOKEN,
test_total=test_total,
tests_passed=tests_passed - 1,
tests_skipped=tests_skipped + 1,
tests_failed=tests_failed,
tests_na=tests_na
)
print(report["results"][0]["checks"][0]["state"])
assert report["results"][0]["checks"][0]["state"] == "skip", \
"State of the test is not as expected"
"""As part of clean up
delete security-scan-cf config
"""
security_scan_config = \
p_client.list_configMap(projectId=system_project_id,
namespaceId="security-scan",
id="security-scan:security-scan-cfg",
name="security-scan-cfg")
print(security_scan_config)
if len(security_scan_config["data"]) != 0:
p_client.delete(security_scan_config["data"][0])
def test_cis_scan_skip_test_api():
client = get_user_client()
cluster = cluster_detail["cluster_14"]
# run security scan
scan_detail = run_scan(cluster, USER_TOKEN, "hardened")
report_link = scan_detail["links"]["report"]
test_total, tests_passed, tests_skipped, tests_failed, tests_na = \
get_scan_results("rke-cis-1.4", "hardened")
verify_cis_scan_report(
report_link, token=USER_TOKEN,
test_total=test_total,
tests_passed=tests_passed,
tests_skipped=tests_skipped,
tests_failed=tests_failed,
tests_na=tests_na
)
# skip test 1.1.3
cluster.runSecurityScan(overrideSkip=["1.1.3"],
profile="hardened",
overrideBenchmarkVersion="rke-cis-1.4")
cluster = client.reload(cluster)
cluster_scan_report_id = cluster["currentCisRunName"]
print(cluster_scan_report_id)
scan_detail = wait_for_scan_active(cluster_scan_report_id, client)
wait_for_cis_pod_remove(cluster, cluster_scan_report_id)
report_link = scan_detail["links"]["report"]
report = verify_cis_scan_report(
report_link, token=USER_TOKEN,
test_total=test_total,
tests_passed=tests_passed - 1,
tests_skipped=tests_skipped + 1,
tests_failed=tests_failed,
tests_na=tests_na
)
assert report["results"][0]["checks"][1]["state"] == "skip", \
"State of the test is not as expected"
def test_cis_scan_edit_cluster():
aws_nodes = cluster_detail["nodes_14"]
client = get_user_client()
cluster = cluster_detail["cluster_14"]
# Add 2 etcd nodes to the cluster
for i in range(0, 2):
aws_node = aws_nodes[3 + i]
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
docker_run_cmd = get_custom_host_registration_cmd(client,
cluster,
["etcd"],
aws_node)
aws_node.execute_command(docker_run_cmd)
wait_for_cluster_node_count(client, cluster, 5)
validate_cluster_state(client, cluster, intermediate_state="updating")
cluster = client.reload(cluster)
# run CIS Scan
scan_detail = run_scan(cluster, USER_TOKEN, "hardened")
report_link = scan_detail["links"]["report"]
test_total, tests_passed, tests_skipped, tests_failed, tests_na = \
get_scan_results("rke-cis-1.4", "hardened")
report = verify_cis_scan_report(
report_link, token=USER_TOKEN,
test_total=test_total,
tests_passed=tests_passed - 2,
tests_skipped=tests_skipped,
tests_failed=tests_failed + 2,
tests_na=tests_na
)
print(report["results"][3]["checks"][18])
assert report["results"][3]["checks"][18]["state"] == "mixed"
# edit nodes and run command
for i in range(0, 2):
aws_node = aws_nodes[3 + i]
aws_node.execute_command("sudo useradd etcd")
# run CIS Scan
scan_detail = run_scan(cluster, USER_TOKEN, "hardened")
report_link = scan_detail["links"]["report"]
report = verify_cis_scan_report(
report_link, token=USER_TOKEN,
test_total=test_total,
tests_passed=tests_passed,
tests_skipped=tests_skipped,
tests_failed=tests_failed,
tests_na=tests_na
)
print(report["results"][3]["checks"][18]["state"])
assert report["results"][3]["checks"][18]["state"] == "pass"
@if_test_rbac
def test_rbac_run_scan_cluster_owner():
client, cluster = get_user_client_and_cluster()
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
# run a permissive scan run
scan_detail = run_scan(cluster, user_token)
report_link = scan_detail["links"]["report"]
test_total, tests_passed, tests_skipped, tests_failed, tests_na = \
get_scan_results("rke-cis-1.4", "permissive")
verify_cis_scan_report(
report_link, token=USER_TOKEN,
test_total=test_total,
tests_passed=tests_passed,
tests_skipped=tests_skipped,
tests_failed=tests_failed,
tests_na=tests_na
)
@if_test_rbac
def test_rbac_run_scan_cluster_member():
client, cluster = get_user_client_and_cluster()
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
run_scan(cluster, user_token, can_run_scan=False)
@if_test_rbac
def test_rbac_run_scan_project_owner():
client, cluster = get_user_client_and_cluster()
user_token = rbac_get_user_token_by_role(PROJECT_OWNER)
run_scan(cluster, user_token, can_run_scan=False)
@if_test_rbac
def test_rbac_run_scan_project_member():
client, cluster = get_user_client_and_cluster()
user_token = rbac_get_user_token_by_role(PROJECT_MEMBER)
run_scan(cluster, user_token, can_run_scan=False)
@if_test_rbac
def test_rbac_run_scan_project_read_only():
client, cluster = get_user_client_and_cluster()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
run_scan(cluster, user_token, can_run_scan=False)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client = get_user_client()
# create cluster for running rke-cis-1.4
cluster_14, aws_nodes_14 = create_cluster_cis()
cluster_detail["cluster_14"] = cluster_14
cluster_detail["nodes_14"] = aws_nodes_14
# create cluster for running rke-cis-1.5
cluster_15, aws_nodes_15 = create_cluster_cis("rke-cis-1.5")
cluster_detail["cluster_15"] = cluster_15
cluster_detail["nodes_15"] = aws_nodes_15
def fin():
cluster_cleanup(client, cluster_14, aws_nodes_14)
cluster_cleanup(client, cluster_15, aws_nodes_15)
request.addfinalizer(fin)
def verify_cis_scan_report(
report_link, token, test_total,
tests_passed, tests_skipped,
tests_failed, tests_na):
head = {'Authorization': 'Bearer ' + token}
response = requests.get(report_link, verify=False, headers=head)
report = response.json()
assert report["total"] == test_total, \
"Incorrect number of tests run"
assert report["pass"] == tests_passed, \
"Incorrect number of tests passed"
assert report["fail"] == tests_failed, \
"Incorrect number of failed tests"
assert report["skip"] == tests_skipped, \
"Incorrect number of tests skipped"
assert report["notApplicable"] == tests_na, \
"Incorrect number of tests marked Not Applicable"
return report
def run_scan(cluster, user_token, profile="permissive",
can_run_scan=True, scan_tool_version=CIS_SCAN_PROFILE):
client = get_client_for_token(user_token)
cluster = get_cluster_by_name(client, cluster.name)
if can_run_scan:
cluster.runSecurityScan(profile=profile,
overrideBenchmarkVersion=scan_tool_version)
cluster = client.reload(cluster)
cluster_scan_report_id = cluster["currentCisRunName"]
print(cluster_scan_report_id)
scan_detail = wait_for_scan_active(cluster_scan_report_id, client)
wait_for_cis_pod_remove(cluster, cluster_scan_report_id)
return scan_detail
else:
assert "runSecurityScan" not in list(cluster.actions.keys()), \
"User has Run CIS Scan permission"
def wait_for_scan_active(cluster_scan_report_id,
client,
timeout=DEFAULT_TIMEOUT):
scan_detail_data = client.list_clusterScan(name=cluster_scan_report_id)
scan_detail = scan_detail_data.data[0]
# wait until scan is active
start = time.time()
state_scan = scan_detail["state"]
while state_scan != "pass" and state_scan != "fail":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state of scan report to get to active")
time.sleep(.5)
scan_detail_data = client.list_clusterScan(name=cluster_scan_report_id)
scan_detail = scan_detail_data.data[0]
state_scan = scan_detail["state"]
print(state_scan)
scan_detail_data = client.list_clusterScan(name=cluster_scan_report_id)
scan_detail = scan_detail_data.data[0]
return scan_detail
def wait_for_cis_pod_remove(cluster,
cluster_scan_report_id,
timeout=DEFAULT_TIMEOUT):
system_project = cluster.projects(name="System")["data"][0]
p_client = get_project_client_for_token(system_project, USER_TOKEN)
pod = p_client.list_pod(namespaceId="security-scan",
name="security-scan-runner-" +
cluster_scan_report_id)
start = time.time()
while len(pod["data"]) != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for removal of security scan pod")
time.sleep(.5)
pod = p_client.list_pod(namespaceId="security-scan",
name="security-scan-runner-" +
cluster_scan_report_id)
time.sleep(.5)
def create_cluster_cis(scan_tool_version="rke-cis-1.4"):
aws_nodes = \
AmazonWebServices().create_multiple_nodes(
5, random_test_name(HOST_NAME))
node_roles = [
["controlplane"], ["etcd"], ["worker"]
]
rke_config_temp = get_cis_rke_config(profile=scan_tool_version)
client = get_user_client()
cluster = client.create_cluster(
name=random_test_name(),
driver="rancherKubernetesEngine",
rancherKubernetesEngineConfig=rke_config_temp,
defaultPodSecurityPolicyTemplateId=POD_SECURITY_POLICY_TEMPLATE
)
assert cluster.state == "provisioning"
# In the original design creates 5 nodes but only 3 are used
# the other 2 nodes are for test_cis_scan_edit_cluster
cluster = configure_cis_requirements(aws_nodes[:3],
scan_tool_version,
node_roles,
client,
cluster
)
return cluster, aws_nodes
def get_scan_results(scan_tool_version, profile):
return scan_results[scan_tool_version]["total"], \
scan_results[scan_tool_version][profile]["pass"], \
scan_results[scan_tool_version][profile]["skip"], \
scan_results[scan_tool_version]["fail"], \
scan_results[scan_tool_version]["not_applicable"]
| 17,915 | 37.119149 | 79 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_rbac.py
|
import pytest
from rancher import ApiError, Client
from .common import (
ADMIN_TOKEN,
CATTLE_API_URL,
assign_members_to_cluster,
assign_members_to_project,
change_member_role_in_cluster,
change_member_role_in_project,
create_ns,
create_project,
create_project_and_ns,
get_user_client,
get_admin_client,
get_client_for_token,
get_cluster_client_for_token,
create_user,
get_user_client_and_cluster
)
def test_rbac_cluster_owner():
client, cluster = get_user_client_and_cluster()
user1, user1_token = create_user(get_admin_client())
# Assert that user1 is not able to list cluster
user1_client = get_client_for_token(user1_token)
clusters = user1_client.list_cluster().data # pylint: disable=no-member
assert len(clusters) == 0
# As admin , add user1 as cluster member of this cluster
client = get_user_client()
assign_members_to_cluster(client, user1, cluster, "cluster-owner")
validate_cluster_owner(user1_token, cluster)
def test_rbac_cluster_member():
client, cluster = get_user_client_and_cluster()
user1, user1_token = create_user(get_admin_client())
# Assert that user1 is not able to list cluster
user1_client = get_client_for_token(user1_token)
clusters = user1_client.list_cluster().data # pylint: disable=no-member
assert len(clusters) == 0
# Add user1 as cluster member of this cluster
client = get_user_client()
assign_members_to_cluster(client, user1, cluster, "cluster-member")
validate_cluster_member(user1_token, cluster)
def test_rbac_project_owner():
client, cluster = get_user_client_and_cluster()
# As admin user create a project and namespace
a_p, a_ns = create_project_and_ns(ADMIN_TOKEN, cluster)
user1, user1_token = create_user(get_admin_client())
# Assert that user1 is not able to list cluster
user1_client = get_client_for_token(user1_token)
clusters = user1_client.list_cluster().data # pylint: disable=no-member
assert len(clusters) == 0
# As admin user, Add user1 as project member of this project
client = get_user_client()
assign_members_to_project(client, user1, a_p, "project-owner")
validate_project_owner(user1_token, cluster, a_p, a_ns)
def test_rbac_project_member():
client, cluster = get_user_client_and_cluster()
# As admin user create a project and namespace
a_p, a_ns = create_project_and_ns(ADMIN_TOKEN, cluster)
user1, user1_token = create_user(get_admin_client())
user2, user2_token = create_user(get_admin_client())
# Assert that user1 is not able to list cluster
user1_client = get_client_for_token(user1_token)
clusters = user1_client.list_cluster().data # pylint: disable=no-member
assert len(clusters) == 0
# As admin user, Add user1 as project member of this project
client = get_user_client()
assign_members_to_project(client, user1, a_p, "project-member")
validate_project_member(user1_token, cluster, a_p, a_ns)
def test_rbac_change_cluster_owner_to_cluster_member():
client, cluster = get_user_client_and_cluster()
user1, user1_token = create_user(get_admin_client())
# Assert that user1 is not able to list cluster
user1_client = get_client_for_token(user1_token)
clusters = user1_client.list_cluster().data # pylint: disable=no-member
assert len(clusters) == 0
# As admin , add user1 as cluster member of this cluster
client = get_user_client()
crtb = assign_members_to_cluster(
client, user1, cluster, "cluster-owner")
validate_cluster_owner(user1_token, cluster)
change_member_role_in_cluster(
client, user1, crtb, "cluster-member")
validate_cluster_member(user1_token, cluster)
def test_rbac_change_cluster_member_to_cluster_owner():
client, cluster = get_user_client_and_cluster()
user1, user1_token = create_user(get_admin_client())
# Assert that user1 is not able to list cluster
user1_client = get_client_for_token(user1_token)
clusters = user1_client.list_cluster().data # pylint: disable=no-member
assert len(clusters) == 0
# Add user1 as cluster member of this cluster
crtb = assign_members_to_cluster(
get_user_client(), user1, cluster, "cluster-member")
validate_cluster_member(user1_token, cluster)
change_member_role_in_cluster(
get_user_client(), user1, crtb, "cluster-owner")
validate_cluster_owner(user1_token, cluster)
def test_rbac_change_project_owner_to_project_member():
client, cluster = get_user_client_and_cluster()
# As admin user create a project and namespace
a_p, a_ns = create_project_and_ns(ADMIN_TOKEN, cluster)
user1, user1_token = create_user(get_admin_client())
# Assert that user1 is not able to list cluster
user1_client = get_client_for_token(user1_token)
clusters = user1_client.list_cluster().data # pylint: disable=no-member
assert len(clusters) == 0
# As admin user, Add user1 as project member of this project
prtb = assign_members_to_project(
get_user_client(), user1, a_p, "project-owner")
validate_project_owner(user1_token, cluster, a_p, a_ns)
change_member_role_in_project(
get_user_client(), user1, prtb, "project-member")
validate_project_member(user1_token, cluster, a_p, a_ns)
def test_rbac_change_project_member_to_project_cluster():
client, cluster = get_user_client_and_cluster()
# As admin user create a project and namespace
a_p, a_ns = create_project_and_ns(ADMIN_TOKEN, cluster)
user1, user1_token = create_user(get_admin_client())
user2, user2_token = create_user(get_admin_client())
# Assert that user1 is not able to list cluster
user1_client = get_client_for_token(user1_token)
clusters = user1_client.list_cluster().data # pylint: disable=no-member
assert len(clusters) == 0
# As admin user, Add user1 as project member of this project
prtb = assign_members_to_project(
get_user_client(), user1, a_p, "project-member")
validate_project_member(user1_token, cluster, a_p, a_ns)
change_member_role_in_project(
get_user_client(), user1, prtb, "project-owner")
validate_project_owner(user1_token, cluster, a_p, a_ns)
def validate_cluster_owner(user_token, cluster):
# As admin user create a project and namespace and a user
user2, user2_token = create_user(get_admin_client())
a_p, a_ns = create_project_and_ns(ADMIN_TOKEN, cluster)
# Assert that user1 is able to see cluster
user_client = get_client_for_token(user_token)
clusters = user_client.list_cluster().data # pylint: disable=no-member
assert len(clusters) == 1
# Assert that user1 is allowed to assign member to the cluster
assign_members_to_cluster(user_client, user2, cluster, "cluster-member")
# Assert that user1 is able to see projects he does not own
project = user_client.list_project( # pylint: disable=no-member
name=a_p.name).data
assert len(project) == 1
# Assert that user1 is able to see namespaces that are in projects
# that he does not own
user_c_client = get_cluster_client_for_token(cluster, user_token)
ns = user_c_client.list_namespace( # pylint: disable=no-member
uuid=a_ns.uuid).data
assert len(ns) == 1
# Assert that user1 is able to create namespaces in the projects
# that he does not own
create_ns(user_c_client, cluster, a_p)
# Assert that user1 is able create projects and namespace in that project
create_project_and_ns(user_token, cluster)
def validate_cluster_member(user_token, cluster):
# As admin user create a project and namespace and a user
a_p, a_ns = create_project_and_ns(ADMIN_TOKEN, cluster)
user2, user2_token = create_user(get_admin_client())
# Assert that user1 is able to see cluster
user_client = get_client_for_token(user_token)
clusters = user_client.list_cluster( # pylint: disable=no-member
name=cluster.name).data
assert len(clusters) == 1
assert clusters[0].name == cluster.name
# Assert that user1 is NOT able to assign member to the cluster
with pytest.raises(ApiError) as e:
assign_members_to_cluster(
user_client, user2, cluster, "cluster-member")
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
# Assert that user1 is NOT able to see projects he does not own
project = user_client.list_project( # pylint: disable=no-member
name=a_p.name).data
assert len(project) == 0
"""
# Assert that user1 is NOT able to access projects that he does not own
with pytest.raises(ApiError) as e:
get_project_client_for_token(a_p, user_token)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
"""
# Assert that user1 is able create projects and namespace in that project
create_project_and_ns(user_token, cluster)
def validate_project_owner(user_token, cluster, project, namespace):
user2, user2_token = create_user(get_admin_client())
# Assert that user1 is now able to see cluster
user_client = get_client_for_token(user_token)
clusters = user_client.list_cluster( # pylint: disable=no-member
name=cluster.name).data
assert len(clusters) == 1
assert clusters[0].name == cluster.name
# Assert that user1 is NOT able to assign member to the cluster
with pytest.raises(ApiError) as e:
assign_members_to_cluster(user_client, user2,
cluster, "cluster-member")
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
# Assert that user1 is able to see projects he is made the owner of
projects = user_client.list_project( # pylint: disable=no-member
name=project.name).data
assert len(projects) == 1
# Assert that user1 is able to access this project
p_user_client = get_cluster_client_for_token(cluster, user_token)
# Assert that user1 is able to see the existing namespace in this project
nss = p_user_client.list_namespace( # pylint: disable=no-member
uuid=namespace.uuid).data
assert len(nss) == 1
# Assert that user1 is able to access this project
create_ns(p_user_client, cluster, project)
# Assert that user1 is able to assign member to the project
assign_members_to_project(user_client, user2, project, "project-member")
# Assert that user1 is NOT able to create project
with pytest.raises(ApiError) as e:
create_project(user_client, cluster)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
def validate_project_member(user_token, cluster, project, namespace):
user2, user2_token = create_user(get_admin_client())
# Assert that user1 is able to see cluster
user_client = Client(url=CATTLE_API_URL, token=user_token,
verify=False)
clusters = user_client.list_cluster().data # pylint: disable=no-member
assert len(clusters) == 1
assert clusters[0].name == cluster.name
# Assert that user1 is NOT able to assign member to the cluster
with pytest.raises(ApiError) as e:
assign_members_to_cluster(user_client, user2,
cluster, "cluster-member")
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
# Assert that user1 is able to see projects he is made member of
projects = user_client.list_project( # pylint: disable=no-member
name=project.name).data
assert len(projects) == 1
# Assert that user1 is able to access this project
p_user_client = get_cluster_client_for_token(cluster, user_token)
# Assert that user1 is able to see the existing namespace in this project
nss = p_user_client.list_namespace( # pylint: disable=no-member
uuid=namespace.uuid).data
assert len(nss) == 1
# Assert that user1 is able create namespace in this project
create_ns(p_user_client, cluster, project)
# Assert that user1 is NOT able to assign member to the project
with pytest.raises(ApiError) as e:
assign_members_to_project(user_client, user2,
project, "project-member")
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
# Assert that user1 is NOT able to create project
with pytest.raises(ApiError) as e:
create_project(user_client, cluster)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
| 12,635 | 37.290909 | 77 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_hosted_eks_cluster.py
|
import os
from .common import get_user_client
from .common import random_test_name
from .common import validate_cluster
from .common import wait_for_cluster_delete
from .test_create_ha import resource_prefix
from lib.aws import AmazonWebServices
import pytest
EKS_ACCESS_KEY = os.environ.get('RANCHER_EKS_ACCESS_KEY', "")
EKS_SECRET_KEY = os.environ.get('RANCHER_EKS_SECRET_KEY', "")
EKS_REGION = os.environ.get('RANCHER_EKS_REGION', "us-east-2")
EKS_K8S_VERSION = os.environ.get('RANCHER_EKS_K8S_VERSION', "1.17")
EKS_NODESIZE = os.environ.get('RANCHER_EKS_NODESIZE', 2)
KMS_KEY = os.environ.get('RANCHER_KMS_KEY', None)
SECRET_ENCRYPTION = os.environ.get('RANCHER_SECRET_ENCRYPTION', False)
LOGGING_TYPES = os.environ.get('RANCHER_LOGGING_TYPES', None)
EKS_SERVICE_ROLE = os.environ.get('RANCHER_EKS_SERVICE_ROLE', None)
EKS_SUBNETS = os.environ.get('RANCHER_EKS_SUBNETS', None)
EKS_SECURITYGROUP = os.environ.get('RANCHER_EKS_SECURITYGROUP', None)
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
EKS_PUBLIC_ACCESS_SOURCES = \
os.environ.get('RANCHER_EKS_PUBLIC_ACCESS_SOURCES', None)
ekscredential = pytest.mark.skipif(not (EKS_ACCESS_KEY and EKS_SECRET_KEY),
reason='EKS Credentials not provided, '
'cannot create cluster')
DEFAULT_TIMEOUT_EKS = 1200
IMPORTED_EKS_CLUSTERS = []
cluster_details = {}
eks_config = {
"imported": False,
"kubernetesVersion": EKS_K8S_VERSION,
"privateAccess": False,
"publicAccess": True,
"publicAccessSources": [],
"securityGroups": [],
"serviceRole": "",
"subnets": [],
"tags": {},
"loggingTypes": [],
"secretsEncryption": False,
"kmsKey": "",
"region": EKS_REGION,
"type": "eksclusterconfigspec",
"nodeGroups": [{
"version": EKS_K8S_VERSION,
"desiredSize": EKS_NODESIZE,
"diskSize": 20,
"gpu": False,
"instanceType": "t3.medium",
"maxSize": EKS_NODESIZE,
"minSize": EKS_NODESIZE,
"nodegroupName": random_test_name("test-ng"),
"type": "nodeGroup",
"subnets": [],
"tags": {},
"labels": {},
"ec2SshKey": ""
}]
}
@ekscredential
def test_eks_v2_hosted_cluster_create_basic():
"""
Create a hosted EKS v2 cluster with all default values from the UI
"""
cluster_name = random_test_name("test-auto-eks")
eks_config_temp = get_eks_config_basic(cluster_name)
cluster_config = {
"eksConfig": eks_config_temp,
"name": cluster_name,
"type": "cluster",
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
"enableClusterAlerting": False,
"enableClusterMonitoring": False
}
create_and_validate_eks_cluster(cluster_config)
# validate cluster created
validate_eks_cluster(cluster_name, eks_config_temp)
# validate nodegroups created
validate_nodegroup(eks_config_temp["nodeGroups"], cluster_name)
@ekscredential
def test_eks_v2_hosted_cluster_create_all():
"""
Create a hosted EKS v2 cluster by giving in value of
every param of eks config from UI
"""
cluster_name = random_test_name("test-auto-eks")
eks_config_temp = get_eks_config_all(cluster_name)
cluster_config = {
"eksConfig": eks_config_temp,
"name": cluster_name,
"type": "cluster",
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
"enableClusterAlerting": False,
"enableClusterMonitoring": False
}
create_and_validate_eks_cluster(cluster_config)
# validate cluster created
validate_eks_cluster(cluster_name, eks_config_temp)
# validate nodegroups created
validate_nodegroup(eks_config_temp["nodeGroups"], cluster_name)
@ekscredential
def test_eks_v2_hosted_cluster_edit():
"""
Create a hosted EKS v2 cluster.
Edit the following input fields:
cluster level tags, add node groups,
add/delete logging types, add new cloud cred
"""
cluster_name = random_test_name("test-auto-eks")
eks_config_temp = get_eks_config_basic(cluster_name)
cluster_config = {
"eksConfig": eks_config_temp,
"name": cluster_name,
"type": "cluster",
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
"enableClusterAlerting": False,
"enableClusterMonitoring": False
}
client, cluster = create_and_validate_eks_cluster(cluster_config)
# edit cluster
cluster = edit_eks_cluster(cluster, eks_config_temp)
# validate cluster created
validate_eks_cluster(cluster_name, eks_config_temp)
# validate nodegroups created
validate_nodegroup(eks_config_temp["nodeGroups"], cluster_name)
@ekscredential
def test_eks_v2_hosted_cluster_delete():
"""
Delete a created hosted EKS v2 cluster and verify it is deleted in the backend
"""
cluster_name = random_test_name("test-auto-eks")
eks_config_temp = get_eks_config_basic(cluster_name)
cluster_config = {
"eksConfig": eks_config_temp,
"name": cluster_name,
"type": "cluster",
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
"enableClusterAlerting": False,
"enableClusterMonitoring": False
}
client, cluster = create_and_validate_eks_cluster(cluster_config)
# delete cluster
client.delete(cluster)
wait_for_cluster_delete(client, cluster)
AmazonWebServices().wait_for_delete_eks_cluster(cluster_name)
@ekscredential
def test_eks_v2_create_import_cluster():
"""
Create an imported EKS cluster with some default values in EKS config
"""
display_name = create_resources_eks()
cluster_name = random_test_name("test-auto-eks")
eks_config_temp = get_eks_config_basic(cluster_name)
eks_config_temp["imported"] = True
cluster_config = {
"eksConfig": eks_config_temp,
"name": cluster_name,
"type": "cluster",
"dockerRootDir": "/var/lib/docker",
"enableNetworkPolicy": False,
"enableClusterAlerting": False,
"enableClusterMonitoring": False
}
create_and_validate_eks_cluster(cluster_config,
imported=True)
def create_resources_eks():
"""
Create an EKS cluster from the EKS console
"""
cluster_name = resource_prefix + "-ekscluster"
AmazonWebServices().create_eks_cluster(cluster_name)
IMPORTED_EKS_CLUSTERS.append(cluster_name)
AmazonWebServices().wait_for_eks_cluster_state(cluster_name, "ACTIVE")
return cluster_name
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
def fin():
client = get_user_client()
for name, cluster in cluster_details.items():
if len(client.list_cluster(name=name).data) > 0:
client.delete(cluster)
for display_name in IMPORTED_EKS_CLUSTERS:
AmazonWebServices().delete_eks_cluster(cluster_name=display_name)
request.addfinalizer(fin)
def create_and_validate_eks_cluster(cluster_config, imported=False):
"""
Create and validate EKS cluster
:param cluster_config: config of the cluster
:param imported: imported is true when user creates an imported cluster
:return: client, cluster
"""
client = get_user_client()
print("Creating EKS cluster")
print("\nEKS Configuration: {}".format(cluster_config))
cluster = client.create_cluster(cluster_config)
print(cluster)
cluster_details[cluster["name"]] = cluster
intermediate_state = False if imported else True
cluster = validate_cluster(client, cluster,
check_intermediate_state=intermediate_state,
skipIngresscheck=True,
timeout=DEFAULT_TIMEOUT_EKS)
return client, cluster
def get_aws_cloud_credential():
"""
Create an AWS cloud creds
:return: ec2_cloud_credential
"""
client = get_user_client()
ec2_cloud_credential_config = {
"accessKey": EKS_ACCESS_KEY,
"secretKey": EKS_SECRET_KEY
}
ec2_cloud_credential = client.create_cloud_credential(
amazonec2credentialConfig=ec2_cloud_credential_config
)
return ec2_cloud_credential
def get_logging_types():
"""
Split all logging types
:return: logging_types
"""
logging_types = []
if LOGGING_TYPES is not None:
temp = LOGGING_TYPES.split(",")
for logging in temp:
logging_types.append(logging)
return logging_types
def get_eks_config_basic(cluster_name):
"""
FIlling in params for a basic EKS v2 cluster
created through UI with default values
:param cluster_name:
:return: eks_config
"""
ec2_cloud_credential = get_aws_cloud_credential()
global eks_config
eks_config_temp = eks_config.copy()
eks_config_temp["displayName"] = cluster_name
eks_config_temp["amazonCredentialSecret"] = ec2_cloud_credential.id
return eks_config_temp
def get_eks_config_all(cluster_name):
"""
FIlling in params for a EKS v2 cluster
created through UI with all values give
:param cluster_name:
:return: eks_config
"""
ec2_cloud_credential = get_aws_cloud_credential()
global eks_config
public_access = [] if EKS_PUBLIC_ACCESS_SOURCES \
is None else EKS_PUBLIC_ACCESS_SOURCES.split(",")
eks_config_temp = eks_config.copy()
eks_config_temp["displayName"] = cluster_name
eks_config_temp["amazonCredentialSecret"] = ec2_cloud_credential.id
if KMS_KEY is not None: eks_config_temp["kmsKey"] = KMS_KEY
if SECRET_ENCRYPTION: eks_config_temp["secretsEncryption"] = \
SECRET_ENCRYPTION
eks_config_temp["subnets"] = [] \
if EKS_SUBNETS is None else EKS_SUBNETS.split(",")
eks_config_temp["securityGroups"] = [] \
if EKS_SECURITYGROUP is None else EKS_SECURITYGROUP.split(",")
eks_config_temp["publicAccessSources"] = public_access
eks_config_temp["tags"] = {"cluster-level": "tag1"}
eks_config_temp["nodeGroups"] = []
eks_config_temp["nodeGroups"].append(get_new_node())
eks_config_temp["nodeGroups"][0]["tags"] = \
{"nodegroup-level": "tag1", "nodegroup-level": "tag2"}
eks_config_temp["nodeGroups"][0]["labels"] = {"label1": "value1"}
eks_config_temp["loggingTypes"] = get_logging_types()
eks_config_temp["serviceRole"] = EKS_SERVICE_ROLE
eks_config_temp["ec2SshKey"] = AWS_SSH_KEY_NAME
return eks_config_temp
def get_new_node():
"""
Create a new node group
:return: new_nodegroup
"""
new_nodegroup = {
"desiredSize": EKS_NODESIZE,
"diskSize": 20,
"gpu": False,
"instanceType": "t3.medium",
"maxSize": EKS_NODESIZE,
"minSize": EKS_NODESIZE,
"nodegroupName": random_test_name("test-ng"),
"ec2SshKey": AWS_SSH_KEY_NAME.split(".pem")[0],
"type": "nodeGroup"
}
return new_nodegroup
def validate_eks_cluster(cluster_name, eks_config_temp):
"""
Validate EKS cluster details
:param cluster_name: cluster name to be validated
:param eks_config_temp: eks_config
:return:
"""
eks_cluster = AmazonWebServices().describe_eks_cluster(cluster_name)
print("\nEKS cluster deployed in EKS Console: {}".
format(eks_cluster["cluster"]))
# check k8s version
assert eks_cluster["cluster"]["version"] == \
eks_config_temp["kubernetesVersion"], "K8s version is incorrect"
# check cluster status
assert eks_cluster["cluster"]["status"] == "ACTIVE", \
"Cluster is NOT in active state"
# verify security groups
assert eks_cluster["cluster"]["resourcesVpcConfig"]["securityGroupIds"].sort() \
== eks_config_temp["securityGroups"].sort()\
, "Mismatch in Security Groups"
# verify subnets
if "subnets" in eks_config_temp.keys():
assert \
eks_cluster["cluster"]["resourcesVpcConfig"]["subnetIds"].sort() \
== eks_config_temp["subnets"].sort(), "Mismatch in Security Groups"
# verify logging types
if "loggingTypes" in eks_config_temp.keys():
for logging in eks_cluster["cluster"]["logging"]["clusterLogging"]:
if logging["enabled"]:
assert logging["types"].sort() \
== eks_config_temp["loggingTypes"].sort() , \
"Mismatch in Logging types set"
# verify serviceRole
if "serviceRole" in eks_config_temp.keys():
assert eks_config_temp["serviceRole"] in \
eks_cluster["cluster"]["roleArn"]
# verify publicAccessSources
if "publicAccessSources" in eks_config_temp.keys():
assert eks_config_temp["publicAccessSources"].sort() == \
eks_cluster["cluster"]["resourcesVpcConfig"]["publicAccessCidrs"].sort()
def edit_eks_cluster(cluster, eks_config_temp):
"""
Edit EKS v2 cluster
:param cluster: cluster
:param eks_config_temp: eks_config
:return: cluster
"""
# edit eks_config_temp
# add new cloud cred
ec2_cloud_credential_new = get_aws_cloud_credential()
eks_config_temp["amazonCredentialSecret"] = ec2_cloud_credential_new.id
# add cluster level tags
eks_config_temp["tags"] = {"cluster-level-2": "tag2"}
# add node group
new_nodegroup = get_new_node()
eks_config_temp["nodeGroups"].append(new_nodegroup)
# modify logging
eks_config_temp["loggingTypes"] = ["audit","api","authenticator"]
client = get_user_client()
client.update(cluster, name=cluster.name, eksConfig=eks_config_temp)
cluster = validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=True,
timeout=DEFAULT_TIMEOUT_EKS)
return cluster
def validate_nodegroup(nodegroup_list, cluster_name):
"""
Validate nodegroup details
:param nodegroup_list: list of nodegroups
:param cluster_name: cluster name
:return:
"""
for nodegroup in nodegroup_list:
print("nodegroup:", nodegroup)
eks_nodegroup = AmazonWebServices().describe_eks_nodegroup(
cluster_name, nodegroup["nodegroupName"]
)
print("\nNode Group from EKS console: {}".format(eks_nodegroup))
# k8s version check
eks_cluster = AmazonWebServices().describe_eks_cluster(cluster_name)
assert eks_cluster["cluster"]["version"] == \
eks_nodegroup["nodegroup"]["version"], \
"Mismatch between K8s version of cluster and nodegroup"
# status of nodegroup
assert eks_nodegroup["nodegroup"]["status"] == "ACTIVE", \
"Nodegroups are not in active status"
# check scalingConfig
assert nodegroup["maxSize"] \
== eks_nodegroup["nodegroup"]["scalingConfig"]["maxSize"], \
"maxSize is incorrect on the nodes"
assert nodegroup["minSize"] \
== eks_nodegroup["nodegroup"]["scalingConfig"]["minSize"], \
"minSize is incorrect on the nodes"
assert nodegroup["minSize"] \
== eks_nodegroup["nodegroup"]["scalingConfig"]["minSize"], \
"minSize is incorrect on the nodes"
# check instance type
assert nodegroup["instanceType"] \
== eks_nodegroup["nodegroup"]["instanceTypes"][0], \
"instanceType is incorrect on the nodes"
# check disk size
assert nodegroup["diskSize"] \
== eks_nodegroup["nodegroup"]["diskSize"], \
"diskSize is incorrect on the nodes"
# check ec2SshKey
if "ec2SshKey" in nodegroup.keys() and \
nodegroup["ec2SshKey"] is not "":
assert nodegroup["ec2SshKey"] \
== eks_nodegroup["nodegroup"]["remoteAccess"]["ec2SshKey"], \
"Ssh key is incorrect on the nodes"
| 16,213 | 34.019438 | 84 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_logging_e2e.py
|
import time
import urllib
import pytest
from .common import CATTLE_TEST_URL
from .common import DEFAULT_TIMEOUT
from .common import USER_TOKEN
from .common import WebsocketLogParse
from .common import create_connection
from .common import create_kubeconfig
from .common import create_project_and_ns
from .common import get_project_client_for_token
from .common import get_user_client_and_cluster
from .common import random_test_name
from .common import wait_for_app_to_active
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None,
"name_prefix": None, "admin_client": None, "sys_p_client": None,
"pod": None}
fluentd_aggregator_answers = {"defaultImage": "true",
"replicas": "1",
"service.type": "ClusterIP",
"persistence.enabled": "false",
"extraPersistence.enabled": "false",
"extraPersistence.size": "10Gi",
"extraPersistence.mountPath": "/extra",
"extraPersistence.storageClass": "",
"output.type": "custom",
"output.flushInterval": "5s",
"output.customConf": "<match **.**>\n @type stdout\n</match>"}
FLUENTD_AGGREGATOR_CATALOG_ID = "catalog://?catalog=library&template=fluentd-aggregator&version=0.3.1"
fluentd_app_name = "rancher-logging"
endpoint_port = "24224"
weight = 100
def test_fluentd_target_logs(setup_fluentd_aggregator, request):
cluster_logging = create_cluster_logging(fluentd_target_without_ssl())
request.addfinalizer(lambda: delete_logging(cluster_logging))
wait_for_logging_app()
# wait for config to sync
time.sleep(90)
validate_websocket_view_logs()
def test_project_fluentd_target_logs(setup_fluentd_aggregator, request):
project_logging = create_project_logging(fluentd_target_without_ssl())
request.addfinalizer(lambda: delete_logging(project_logging))
wait_for_logging_app()
# wait for config to sync
# wait for project logs to start being forwarded
time.sleep(90)
validate_websocket_view_logs()
def wait_for_logging_app():
sys_p_client = namespace["sys_p_client"]
wait_for_app_to_active(sys_p_client, fluentd_app_name)
def fluentd_target_without_ssl():
return {"compress": True,
"enableTls": False,
"sslVerify": False,
"fluentServers": [
{
"endpoint": namespace["hostname"]+ ":" + endpoint_port,
"weight": weight
}
],
}
def get_system_project_client():
cluster = namespace["cluster"]
admin_client = namespace["admin_client"]
projects = admin_client.list_project(name="System",
clusterId=cluster.id).data
assert len(projects) == 1
project = projects[0]
sys_p_client = get_project_client_for_token(project, USER_TOKEN)
return sys_p_client
def create_cluster_logging(config, json_parsing=False):
cluster = namespace["cluster"]
admin_client = namespace["admin_client"]
name = random_test_name("fluentd")
return admin_client.create_cluster_logging(name=name,
clusterId=cluster.id,
fluentForwarderConfig=config,
enableJSONParsing=json_parsing,
outputFlushInterval=5
)
def create_project_logging(config, json_parsing=False):
admin_client = namespace["admin_client"]
cluster = namespace["cluster"]
projects = admin_client.list_project(name="System",
clusterId=cluster.id).data
assert len(projects) == 1
project = projects[0]
name = random_test_name("project-fluentd")
return admin_client.create_project_logging(name=name,
projectId=project.id,
fluentForwarderConfig=config,
enableJSONParsing=json_parsing,
outputFlushInterval=5
)
def delete_logging(logging_project):
admin_client = namespace["admin_client"]
admin_client.delete(logging_project)
def validate_websocket_view_logs():
url_base = 'wss://' + CATTLE_TEST_URL[8:] + \
'/k8s/clusters/' + namespace["cluster"].id + \
'/api/v1/namespaces/' + namespace["ns"].name + \
'/pods/' + namespace["pod"].name + \
'/log?container=' + namespace["pod"].containers[0].name
params_dict = {
"tailLines": 500,
"follow": True,
"timestamps": True,
"previous": False,
}
params = urllib.parse.urlencode(params_dict, doseq=True,
quote_via=urllib.parse.quote, safe='()')
url = url_base + "&" + params
wait_for_match(WebsocketLogParse(), url)
def wait_for_match(wslog, url, timeout=DEFAULT_TIMEOUT):
start = time.time()
found = False
ws = create_connection(url, ["base64.binary.k8s.io"])
assert ws.connected, "failed to build the websocket"
wslog.start_thread(target=wslog.receiver, args=(ws, False))
while not found:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for string to match in logs")
time.sleep(1)
print('shell command and output:\n' + wslog.last_message + '\n')
if 'log_type' in wslog.last_message or '{"log"' in wslog.last_message:
found = True
wslog.last_message = ''
break
ws.close()
assert found == True
@pytest.fixture(autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
project_name = random_test_name("testlogging")
p, ns = create_project_and_ns(USER_TOKEN, cluster,
project_name)
p_client = get_project_client_for_token(p, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["admin_client"] = client
namespace["sys_p_client"] = get_system_project_client()
def fin():
client.delete(namespace["project"])
request.addfinalizer(fin)
@pytest.fixture
def setup_fluentd_aggregator():
p_client = namespace["p_client"]
ns = namespace["ns"]
name = random_test_name("fluentd-aggregator")
namespace["name_prefix"] = name
app = p_client.create_app(name=name,
answers=fluentd_aggregator_answers,
targetNamespace=ns.name,
externalId=FLUENTD_AGGREGATOR_CATALOG_ID,
namespaceId=ns.id)
wait_for_app_to_active(p_client, app.name)
namespace["hostname"] = namespace["name_prefix"] + \
"." + namespace["ns"].name + \
".svc.cluster.local"
wl = p_client.list_workload(name=name).data[0]
pod = p_client.list_pod(workloadId=wl.id).data[0]
namespace["pod"] = pod
| 7,498 | 35.940887 | 102 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/__init__.py
| 0 | 0 | 0 |
py
|
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_boto_create_eks.py
|
import base64
from .common import run_command_with_stderr
from .test_eks_cluster import ekscredential
from .test_eks_cluster import \
DATA_SUBDIR, EKS_ACCESS_KEY, EKS_SECRET_KEY, EKS_REGION
from .test_rke_cluster_provisioning import evaluate_clustername
from lib.aws import AmazonWebServices
@ekscredential
def test_boto_create_eks():
cluster_name = evaluate_clustername()
AmazonWebServices().create_eks_cluster(cluster_name)
kc_path = get_eks_kubeconfig(cluster_name)
out = run_command_with_stderr(
'kubectl --kubeconfig {} get svc'.format(kc_path))
print(out)
out = run_command_with_stderr(
'kubectl --kubeconfig {} get nodes'.format(kc_path))
print(out)
def get_eks_kubeconfig(cluster_name):
kubeconfig_path = DATA_SUBDIR + "/kube_config_hosted_eks.yml"
exports = 'export AWS_ACCESS_KEY_ID={} && ' + \
'export AWS_SECRET_ACCESS_KEY={}'.format(
EKS_ACCESS_KEY, EKS_SECRET_KEY)
# log_out=False so we don't write the keys to the console
run_command_with_stderr(exports, log_out=False)
command = 'aws eks --region {} update-kubeconfig '.format(EKS_REGION) + \
'--name {} --kubeconfig {}'.format(cluster_name, kubeconfig_path)
run_command_with_stderr(command)
print("\n\nKubeconfig:")
kubeconfig_file = open(kubeconfig_path, "r")
kubeconfig_contents = kubeconfig_file.read()
kubeconfig_file.close()
kubeconfig_contents_encoded = base64.b64encode(
kubeconfig_contents.encode("utf-8")).decode("utf-8")
print("\n\n" + kubeconfig_contents + "\n\n")
print("\nBase64 encoded: \n\n" + kubeconfig_contents_encoded + "\n\n")
return kubeconfig_path
| 1,695 | 34.333333 | 77 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_workload.py
|
import pytest
from .common import * # NOQA
from rancher import ApiError
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
RBAC_ROLES = [CLUSTER_OWNER, PROJECT_MEMBER, PROJECT_OWNER,
PROJECT_READ_ONLY, CLUSTER_MEMBER]
WORKLOAD_TYPES = ["daemonSet", "statefulSet", "cronJob", "job"]
if_check_lb = os.environ.get('RANCHER_CHECK_FOR_LB', "False")
if_check_lb = pytest.mark.skipif(
if_check_lb != "True",
reason='Lb test case skipped')
ENABLE_HOST_NODE_PORT_TESTS = ast.literal_eval(
os.environ.get('RANCHER_ENABLE_HOST_NODE_PORT_TESTS', "True"))
skip_host_node_port = pytest.mark.skipif(
not ENABLE_HOST_NODE_PORT_TESTS,
reason='Tests Skipped for AKS,GKE,EKS Clusters')
def test_wl_sidekick():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("sidekick")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
side_con = {"name": "test2",
"image": TEST_IMAGE_NGINX,
"stdin": True,
"tty": True}
con.append(side_con)
workload = p_client.update(workload,
containers=con)
time.sleep(90)
validate_workload_with_sidekicks(
p_client, workload, "deployment", ns.name)
def test_wl_deployment():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
def test_wl_statefulset():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
statefulSetConfig={}
)
validate_workload(p_client, workload, "statefulSet", ns.name)
def test_wl_daemonset():
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
schedulable_node_count = len(get_schedulable_nodes(cluster))
validate_workload(p_client, workload, "daemonSet",
ns.name, schedulable_node_count)
def test_wl_cronjob():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
cronJobConfig={
"concurrencyPolicy": "Allow",
"failedJobsHistoryLimit": 10,
"schedule": "*/1 * * * *",
"successfulJobsHistoryLimit": 10})
validate_workload(p_client, workload, "cronJob", ns.name)
def test_wl_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
revisions = workload.revisions()
assert len(revisions) == 1
for revision in revisions:
if revision["containers"][0]["image"] == TEST_IMAGE:
firstrevision = revision.id
con = [{"name": "test1",
"image": TEST_IMAGE_NGINX}]
p_client.update(workload, containers=con)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_NGINX, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_NGINX, ns)
revisions = workload.revisions()
assert len(revisions) == 2
for revision in revisions:
if revision["containers"][0]["image"] == TEST_IMAGE_NGINX:
secondrevision = revision.id
con = [{"name": "test1",
"image": TEST_IMAGE_OS_BASE,
"tty": True,
"stdin": True}]
p_client.update(workload, containers=con)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_OS_BASE, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_OS_BASE, ns)
revisions = workload.revisions()
assert len(revisions) == 3
for revision in revisions:
if revision["containers"][0]["image"] == TEST_IMAGE_OS_BASE:
thirdrevision = revision.id
p_client.action(workload, "rollback", replicaSetId=firstrevision)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE, ns)
p_client.action(workload, "rollback", replicaSetId=secondrevision)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_NGINX, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_NGINX, ns)
p_client.action(workload, "rollback", replicaSetId=thirdrevision)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_OS_BASE, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_OS_BASE, ns)
def test_wl_pod_scale_up():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
workload = wait_for_wl_to_active(p_client, workload)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns.name
allpods = execute_kubectl_cmd(get_pods)
wait_for_pods_in_workload(p_client, workload, 1)
p_client.update(workload, scale=2, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_pods_are_running_by_id(allpods, workload, ns.name)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
allpods = execute_kubectl_cmd(get_pods)
wait_for_pods_in_workload(p_client, workload, 2)
p_client.update(workload, scale=3, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 3)
validate_pods_are_running_by_id(allpods, workload, ns.name)
def test_wl_pod_scale_down():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=3)
wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
p_client.update(workload, scale=2, containers=con)
wait_for_pods_in_workload(p_client, workload, 2)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns.name
allpods = execute_kubectl_cmd(get_pods)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_pods_are_running_by_id(allpods, workload, ns.name)
p_client.update(workload, scale=1, containers=con)
wait_for_pods_in_workload(p_client, workload, 1)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
allpods = execute_kubectl_cmd(get_pods)
validate_workload(p_client, workload, "deployment", ns.name)
validate_pods_are_running_by_id(allpods, workload, ns.name)
def test_wl_pause_orchestration():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=2)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
p_client.action(workload, "pause")
validate_workload_paused(p_client, workload, True)
con = [{"name": "test1",
"image": TEST_IMAGE_NGINX}]
p_client.update(workload, containers=con)
validate_pod_images(TEST_IMAGE, workload, ns.name)
p_client.action(workload, "resume")
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload_paused(p_client, workload, False)
validate_pod_images(TEST_IMAGE_NGINX, workload, ns.name)
# Windows could not support host port for now.
@skip_test_windows_os
@skip_host_node_port
def test_wl_with_hostPort():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 9999
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "HostPort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
@skip_host_node_port
def test_wl_with_nodePort():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30456
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "NodePort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
def test_wl_with_clusterIp():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30458
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "ClusterIP",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
# Get cluster Ip
sd_records = p_client.list_dns_record(name=name).data
assert len(sd_records) == 1
cluster_ip = sd_records[0].clusterIp
# Deploy test pods used for clusteIp resolution check
wlname = random_test_name("testclusterip-client")
con = [{"name": "test1",
"image": TEST_IMAGE}]
workload_for_test = p_client.create_workload(name=wlname,
containers=con,
namespaceId=ns.id,
scale=2)
wait_for_wl_to_active(p_client, workload_for_test)
test_pods = wait_for_pods_in_workload(p_client, workload_for_test, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
@if_check_lb
def test_wl_with_lb():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 9001
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "LoadBalancer",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
validate_lb(p_client, workload, source_port)
def test_wl_with_clusterIp_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30459
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "ClusterIP",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test-cluster-ip",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("cluster-ip-scale-upgrade")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
sd_records = p_client.list_dns_record(name=name).data
assert len(sd_records) == 1
cluster_ip = sd_records[0].clusterIp
# get test pods
wlname = random_test_name("testclusterip-client")
wl_con = [{"name": "test1", "image": TEST_IMAGE}]
workload_for_test = p_client.create_workload(name=wlname,
containers=wl_con,
namespaceId=ns.id,
scale=2)
wait_for_wl_to_active(p_client, workload_for_test)
test_pods = wait_for_pods_in_workload(p_client, workload_for_test, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
# scale up
p_client.update(workload, scale=3, caontainers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
# scale down
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
# upgrade
con = [{"name": "test-cluster-ip-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
@skip_host_node_port
def test_wl_with_nodePort_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30457
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "NodePort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("test-node-port-scale-upgrade")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# scale up
p_client.update(workload, scale=3, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# scale down
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# upgrade
con = [{"name": "test-node-port-scale-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# Windows could not support host port for now.
@skip_test_windows_os
@skip_host_node_port
def test_wl_with_hostPort_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 8888
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "HostPort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test-host-port-upgrade",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("hostport-scale")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
# scale up
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
# scale down
p_client.update(workload, scale=1, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
# From my observation, it is necessary to wait until
# the number of pod equals to the expected number,
# since the workload's state is 'active' but pods
# are not ready yet especially after scaling down and upgrading.
# upgrade
con = [{"name": "test-host-port-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
@if_check_lb
def test_wl_with_lb_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 9001
port = {"containerPort": TEST_IMAGE_PORT,
"type": "containerPort",
"kind": "LoadBalancer",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("lb-scale-upgrade")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_lb(p_client, workload, source_port)
# scale up
p_client.update(workload, scale=3, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
validate_lb(p_client, workload, source_port)
# scale down
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_lb(p_client, workload, source_port)
# upgrade
con = [{"name": "test-load-balance-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_lb(p_client, workload, source_port)
# --------------------- rbac tests for cluster owner -----------------------
@if_test_rbac
def test_rbac_cluster_owner_wl_create(remove_resource):
# cluster owner can create project and deploy workload in it
p_client, project, ns, workload = setup_project_by_role(CLUSTER_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_cluster_owner_wl_create_2(remove_resource):
# cluster owner can deploy workload in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p2 = rbac_get_unshared_project()
p_client2 = get_project_client_for_token(p2, user_token)
ns2 = rbac_get_unshared_ns()
name = random_test_name("default")
con = [{"name": "test1",
"image": TEST_IMAGE}]
wl = p_client2.create_workload(name=name, containers=con,
namespaceId=ns2.id)
validate_workload(p_client2, wl, "deployment", ns2.name)
remove_resource(wl)
@if_test_rbac
def test_rbac_cluster_owner_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster owner can edit workload in the project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1",
"image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_cluster_owner_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster owner can delete workload in the project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for cluster member -----------------------
@if_test_rbac
def test_rbac_cluster_member_wl_create(remove_resource):
# cluster member can create project and deploy workload in it
p_client, project, ns, workload = setup_project_by_role(CLUSTER_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_cluster_member_wl_create_2():
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
# cluster member can NOT deploy workload in the project he can NOT access
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
new_p_client.create_workload(name=name, containers=con,
namespaceId=ns2.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_cluster_member_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster member can edit workload in the project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_cluster_member_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster member can delete workload in the project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for project member -----------------------
@if_test_rbac
def test_rbac_project_member_wl_create(remove_resource):
# project member can deploy workload in his project
p_client, project, ns, workload = setup_project_by_role(PROJECT_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_project_member_wl_create_2():
# project member can NOT deploy workload in the project he can NOT access
user_token = rbac_get_user_token_by_role(PROJECT_MEMBER)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
new_p_client.create_workload(name=name, containers=con,
namespaceId=ns2.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_project_member_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project member can edit workload in the project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_project_member_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project member can delete workload in the project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for project owner -----------------------
@if_test_rbac
def test_rbac_project_owner_wl_create(remove_resource):
# project owner can deploy workload in his project
p_client, project, ns, workload = setup_project_by_role(PROJECT_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_project_owner_wl_create_2():
# project owner can NOT deploy workload in the project he can NOT access
user_token = rbac_get_user_token_by_role(PROJECT_OWNER)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
new_p_client.create_workload(name=name, containers=con,
namespaceId=ns2.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_project_owner_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project owner can edit workload in his project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_project_owner_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project owner can delete workload in his project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for project read-only --------------------
@if_test_rbac
def test_rbac_project_read_only_wl_create():
# project read-only can NOT deploy workloads in the project
project = rbac_get_project()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
with pytest.raises(ApiError) as e:
p_client.create_workload(name=name, containers=con,
namespaceId=ns.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_project_read_only_wl_edit(remove_resource):
project = rbac_get_project()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
p_client = get_project_client_for_token(project, user_token)
# deploy a workload as cluster owner
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
ns = rbac_get_namespace()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = cluster_owner_p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
# project read-only can NOT edit existing workload
with pytest.raises(ApiError) as e:
p_client.update(workload, scale=2)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
@if_test_rbac
def test_rbac_project_read_only_wl_list():
# project read-only can NOT see workloads in the project he has no access
p2 = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
p_client = get_project_client_for_token(p2, user_token)
workloads = p_client.list_workload().data
assert len(workloads) == 0
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(
USER_TOKEN, cluster, random_test_name("testworkload"))
p_client = get_project_client_for_token(p, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
def setup_project_by_role(role, remove_resource):
""" set up a project for a specific role used for rbac testing
- for cluster owner or cluster member:
it creates a project and namespace, then deploys a workload
- for project owner or project member:
it deploys a workload to the existing project and namespace
"""
user_token = rbac_get_user_token_by_role(role)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role in [CLUSTER_OWNER, CLUSTER_MEMBER]:
project, ns = create_project_and_ns(user_token, namespace["cluster"],
random_test_name("test-rbac"))
p_client = get_project_client_for_token(project, user_token)
workload = p_client.create_workload(name=name, containers=con,
namespaceId=ns.id)
remove_resource(project)
remove_resource(ns)
remove_resource(workload)
return p_client, project, ns, workload
elif role in [PROJECT_OWNER, PROJECT_MEMBER]:
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
workload = p_client.create_workload(name=name, containers=con,
namespaceId=ns.id)
remove_resource(workload)
return p_client, project, ns, workload
else:
return None, None, None, None
# --------------------- rbac tests by workload types -----------------------
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametarize_create(role, config, remove_resource):
p_client, project, ns = setup_wl_project_by_role(role)
cluster = namespace["cluster"]
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role != PROJECT_READ_ONLY:
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
remove_resource(workload)
if role == CLUSTER_MEMBER:
remove_resource(project)
return None
else:
with pytest.raises(ApiError) as e:
workload = create_workload_by_type(p_client, name, con, ns, config)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_create_negative(role, remove_resource, config):
if role == CLUSTER_OWNER:
# cluster owner can deploy workloads in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p2 = rbac_get_unshared_project()
p_client2 = get_project_client_for_token(p2, user_token)
ns2 = rbac_get_unshared_ns()
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
wl = create_workload_by_type(p_client2, name, con, ns2, config)
wait_for_wl_to_active(p_client2, wl)
remove_resource(wl)
else:
# roles cannot deploy workloads in projects they cannot access
user_token = rbac_get_user_token_by_role(role)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
workload = create_workload_by_type(new_p_client, name, con, ns2, config)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_list(role, remove_resource, config):
if role == CLUSTER_MEMBER:
p_client, project, ns = setup_wl_project_by_role(role)
else:
p_client, project, ns = setup_wl_project_by_role(CLUSTER_OWNER)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
# switch to rbac role
user_token = rbac_get_user_token_by_role(role)
p_client_rbac = get_project_client_for_token(project, user_token)
assert len(p_client_rbac.list_workload(uuid=workload.uuid).data) == 1
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_list_negative(role, remove_resource, config):
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client = get_project_client_for_token(unshared_project, user_token)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
wait_for_wl_to_active(p_client, workload)
# switch to rbac role
user_token = rbac_get_user_token_by_role(role)
p_client_rbac = get_project_client_for_token(unshared_project, user_token)
if role != CLUSTER_OWNER:
assert len(p_client_rbac.list_workload(uuid=workload.uuid).data) == 0
else:
assert len(p_client_rbac.list_workload(uuid=workload.uuid).data) == 1
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_update(role, remove_resource, config):
# workloads of type job cannot be edited
if config == "job":
return
p_client, project, ns = setup_wl_project_by_role(role)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role != PROJECT_READ_ONLY:
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
con = [{"name": "test1", "image": os.environ.get('RANCHER_TEST_IMAGE',
"nginx")}]
p_client.update(workload, containers=con)
remove_resource(workload)
if role == CLUSTER_MEMBER:
remove_resource(project)
else:
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
with pytest.raises(ApiError) as e:
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(project, user_token)
con = [{"name": "test1", "image": os.environ.get('RANCHER_TEST_IMAGE',
"nginx")}]
p_client.update(workload, containers=con)
wait_for_pods_in_workload(p_client, workload)
validate_workload(p_client, workload, config, ns.name)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_update_negative(role, remove_resource, config):
# workloads of type job cannot be edited
if config == "job":
return
if role == CLUSTER_OWNER:
# cluster owner can edit workloads in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client, project, ns = setup_wl_project_by_role(role)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
remove_resource(workload)
else:
project2 = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(role)
# roles cannot edit workloads in projects they cannot access
# deploy a workload as cluster owner
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = get_project_client_for_token(
project2, cluster_owner_token)
ns = rbac_get_unshared_ns()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = create_workload_by_type(cluster_owner_p_client,
name, con, ns, config)
with pytest.raises(ApiError) as e:
p_client = get_project_client_for_token(project2, user_token)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_delete(role, remove_resource, config):
p_client, project, ns = setup_wl_project_by_role(role)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role != PROJECT_READ_ONLY:
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
remove_resource(workload)
else:
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(project, user_token)
with pytest.raises(ApiError) as e:
p_client.delete(workload)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
if role == CLUSTER_MEMBER:
remove_resource(project)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_delete_negative(role, remove_resource, config):
if role == CLUSTER_OWNER:
# cluster owner can delete workloads in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_unshared_project()
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
workload = create_workload_by_type(p_client, name, con, ns, config)
p_client.delete(workload)
else:
project = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(role)
# roles cannot delete workloads in projects they cannot access
# deploy a workload as cluster owner
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = get_project_client_for_token(
project, cluster_owner_token)
ns = rbac_get_unshared_ns()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = create_workload_by_type(cluster_owner_p_client,
name, con, ns, config)
p_client = get_project_client_for_token(project, user_token)
with pytest.raises(ApiError) as e:
p_client.delete(workload)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
def setup_wl_project_by_role(role):
if role == CLUSTER_MEMBER:
user_token = rbac_get_user_token_by_role(role)
project, ns = create_project_and_ns(user_token, namespace["cluster"],
random_test_name("test-rbac"))
p_client = get_project_client_for_token(project, user_token)
return p_client, project, ns
else:
project = rbac_get_project()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
return p_client, project, ns
def create_workload_by_type(client, name, con, ns, config):
if config == "daemonSet":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
elif config == "statefulSet":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
statefulSetConfig={})
elif config == "cronJob":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
cronJobConfig={
"concurrencyPolicy": "Allow",
"failedJobsHistoryLimit": 10,
"schedule": "*/1 * * * *",
"successfulJobsHistoryLimit": 10})
elif config == "job":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
jobConfig={})
| 48,386 | 41.333333 | 84 |
py
|
rancher
|
rancher-master/tests/validation/tests/v3_api/test_bkp_restore_local_with_snapshot_check.py
|
import pytest
from .common import * # NOQA
from .test_rke_cluster_provisioning import create_and_validate_custom_host
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None,
"nodes": []}
backup_info = {"backupname": None, "backup_id": None, "workload": None,
"backupfilename": None, "etcdbackupdata": None}
@if_test_all_snapshot
def test_bkp_restore_local_with_snapshot_check_create():
validate_backup_create(namespace, backup_info, "filesystem")
@if_test_all_snapshot
def test_bkp_restore_local_with_snapshot_check_restore():
ns, binfo = validate_backup_create(namespace, backup_info, "filesystem")
validate_backup_restore(ns, binfo)
@if_test_all_snapshot
def test_bkp_restore_local_with_snapshot_check_delete():
ns, binfo = validate_backup_create(namespace, backup_info, "filesystem")
ns, binfo = validate_backup_restore(ns, binfo)
validate_backup_delete(ns, binfo, "filesystem")
@pytest.fixture(scope='module', autouse="True")
def create_project_client_ec2(request):
node_roles = [["controlplane"], ["etcd"],
["worker"], ["worker"], ["worker"]]
cluster, aws_nodes = create_and_validate_custom_host(node_roles, True)
client = get_user_client()
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testsecret")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
namespace["nodes"] = aws_nodes.copy()
def fin():
client.delete(p)
cluster_cleanup(client, cluster, aws_nodes)
request.addfinalizer(fin)
| 1,780 | 35.346939 | 76 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.